From 084bb5bc00c19ec32b45f44d11ba6a0ca2514ec3 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 20 Aug 2014 20:49:54 +0100 Subject: ARM: 8131/1: arm/smp: Absorb boot_secondary() After becoming a mandatory function, boot_secondary() is no longer used outside arch/arm/kernel/smp.c. Hence remove its public prototype, and, as suggested by Arnd, let it be absorbed by its single caller. Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 9388a3d..d19aea4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -95,6 +95,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; + if (!smp_ops.smp_boot_secondary) + return -ENOSYS; + /* * We need to tell the secondary core where to find * its stack and the page tables. @@ -113,7 +116,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) /* * Now bring the CPU into our world. */ - ret = boot_secondary(cpu, idle); + ret = smp_ops.smp_boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it @@ -142,13 +145,6 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -int boot_secondary(unsigned int cpu, struct task_struct *idle) -{ - if (smp_ops.smp_boot_secondary) - return smp_ops.smp_boot_secondary(cpu, idle); - return -ENOSYS; -} - int platform_can_cpu_hotplug(void) { #ifdef CONFIG_HOTPLUG_CPU -- cgit v1.1 From a040803a9d6b8c1876d3487a5cb69602ebcbb82c Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 1 Sep 2014 17:14:29 +0100 Subject: ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs Since commit 1dbfa187dad ("ARM: irq migration: force migration off CPU going down") the ARM interrupt migration code on cpu offline calls irqchip.irq_set_affinity() with the argument force=true. At the point of this change the argument had no effect because it was not used by any interrupt chip driver and there was no semantics defined. This changed with commit 01f8fa4f01d8 ("genirq: Allow forcing cpu affinity of interrupts") which made the force argument useful to route interrupts to not yet online cpus without checking the target cpu against the cpu online mask. The following commit ffde1de64012 ("irqchip: gic: Support forced affinity setting") implemented this for the GIC interrupt controller. As a consequence the ARM cpu offline irq migration fails if CPU0 is offlined, because CPU0 is still set in the affinity mask and the validataion against cpu online mask is skipped to the force argument being true. The following first_cpu(mask) selection always selects CPU0 as the target. Solve the issue by calling irq_set_affinity() with force=false from the CPU offline irq migration code so the GIC driver validates the affinity mask against CPU online mask and therefore removes CPU0 from the possible target candidates. Tested on TC2 hotpluging CPU0 in and out. Without this patch the system locks up as the IRQs are not migrated away from CPU0. Signed-off-by: Sudeep Holla Acked-by: Thomas Gleixner Acked-by: Mark Rutland Cc: # 3.10.x Signed-off-by: Russell King --- arch/arm/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c42576..5c4d38e 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc) c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; -- cgit v1.1 From e918a62a2ba81d10a3cc2c513dc70034c9524a95 Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Mon, 1 Sep 2014 17:16:01 +0100 Subject: ARM: 8135/1: Fix in-correct barrier usage in SWP{B} emulation According to the ARM ARMv7, explicit barriers are necessary when using synchronisation primitives such as SWP{B}. The use of these instructions does not automatically imply a barrier and any ordering requirements by the software must be explicitly expressed with the use of suitable barriers. Based on this, remove the barriers from SWP{B} emulation. Acked-by: Will Deacon Signed-off-by: Punit Agrawal Signed-off-by: Russell King --- arch/arm/kernel/swp_emulate.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 67ca857..587fdfe 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, while (1) { unsigned long temp; - /* - * Barrier required between accessing protected resource and - * releasing a lock for it. Legacy code might not have done - * this, and we cannot determine that this is not the case - * being emulated, so insert always. - */ - smp_mb(); - if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else @@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, } if (res == 0) { - /* - * Barrier also required between acquiring a lock for a - * protected resource and accessing the resource. Inserted for - * same reason as above. - */ - smp_mb(); - if (type == TYPE_SWPB) swpbcounter++; else -- cgit v1.1 From 7a0bd49713aca3040099e1413d1cc9f08802d97a Mon Sep 17 00:00:00 2001 From: Victor Kamensky Date: Sat, 13 Sep 2014 23:29:17 +0100 Subject: ARM: 8151/1: add missing exports for asm functions required by get_user macro Previous commits that dealt with get_user for 64bit type missed to export proper functions, so if get_user macro with particular target/value types are used by kernel module modpost would produce 'undefined!' error. Solution is to export all required functions. Signed-off-by: Victor Kamensky Signed-off-by: Russell King --- arch/arm/kernel/armksyms.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f7b450f..a88671c 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); +EXPORT_SYMBOL(__get_user_8); + +#ifdef __ARMEB__ +EXPORT_SYMBOL(__get_user_64t_1); +EXPORT_SYMBOL(__get_user_64t_2); +EXPORT_SYMBOL(__get_user_64t_4); +EXPORT_SYMBOL(__get_user_32t_8); +#endif EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); -- cgit v1.1 From fbfb872f5f417cea48760c535e0ff027c88b507a Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 11 Sep 2014 02:49:08 +0100 Subject: ARM: 8148/1: flush TLS and thumbee register state during exec The TPIDRURO and TPIDRURW registers need to be flushed during exec; otherwise TLS information is potentially leaked. TPIDRURO in particular needs careful treatment. Since flush_thread basically needs the same code used to set the TLS in arm_syscall, pull that into a common set_tls helper in tls.h and use it in both places. Similarly, TEEHBR needs to be cleared during exec as well. Clearing its save slot in thread_info isn't right as there is no guarantee that a thread switch will occur before the new program runs. Just setting the register directly is sufficient. Signed-off-by: Nathan Lynch Acked-by: Will Deacon Cc: Signed-off-by: Russell King --- arch/arm/kernel/process.c | 2 ++ arch/arm/kernel/thumbee.c | 2 +- arch/arm/kernel/traps.c | 17 +---------------- 3 files changed, 4 insertions(+), 17 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 81ef686..a35f6eb 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -334,6 +334,8 @@ void flush_thread(void) memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); + flush_tls(); + thread_notify(THREAD_NOTIFY_FLUSH, thread); } diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 7b8403b..80f0d69 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c @@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void switch (cmd) { case THREAD_NOTIFY_FLUSH: - thread->thumbee_state = 0; + teehbr_write(0); break; case THREAD_NOTIFY_SWITCH: current_thread_info()->thumbee_state = teehbr_read(); diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c8e4bb7..a964c9f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags) #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) asmlinkage int arm_syscall(int no, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); siginfo_t info; if ((no >> 16) != (__ARM_NR_BASE>> 16)) @@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) return regs->ARM_r0; case NR(set_tls): - thread->tp_value[0] = regs->ARM_r0; - if (tls_emu) - return 0; - if (has_tls_reg) { - asm ("mcr p15, 0, %0, c13, c0, 3" - : : "r" (regs->ARM_r0)); - } else { - /* - * User space must never try to access this directly. - * Expect your app to break eventually if you do so. - * The user helper at 0xffff0fe0 must be used instead. - * (see entry-armv.S for details) - */ - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; - } + set_tls(regs->ARM_r0); return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG -- cgit v1.1 From 505013bc9065391f09a51d51cd3bf0b06dfb570a Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 11 Sep 2014 23:25:30 +0100 Subject: ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts Rob Clark reports a sleeping while atomic bug when using perf. BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8() DEBUG_LOCKS_WARN_ON(in_interrupt()) Modules linked in: CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819 [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (dump_stack+0x98/0xb8) [] (dump_stack) from [] (warn_slowpath_common+0x70/0x8c) [] (warn_slowpath_common) from [] (warn_slowpath_fmt+0x30/0x40) [] (warn_slowpath_fmt) from [] (mutex_lock_nested+0x3a0/0x3e8) [] (mutex_lock_nested) from [] (irq_find_host+0x20/0x9c) [] (irq_find_host) from [] (of_irq_get+0x28/0x48) [] (of_irq_get) from [] (platform_get_irq+0x1c/0x8c) [] (platform_get_irq) from [] (cpu_pmu_enable_percpu_irq+0x14/0x38) [] (cpu_pmu_enable_percpu_irq) from [] (flush_smp_call_function_queue+0x88/0x178) [] (flush_smp_call_function_queue) from [] (handle_IPI+0x88/0x160) [] (handle_IPI) from [] (gic_handle_irq+0x64/0x68) [] (gic_handle_irq) from [] (__irq_svc+0x44/0x5c) Exception stack(0xe63ddea0 to 0xe63ddee8) dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013 dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30 dee0: 20010013 ffffffff [] (__irq_svc) from [] (ktime_get_ts64+0x1c8/0x200) [] (ktime_get_ts64) from [] (poll_select_set_timeout+0x60/0xa8) [] (poll_select_set_timeout) from [] (SyS_select+0xa8/0x118) [] (SyS_select) from [] (ret_fast_syscall+0x0/0x48) ---[ end trace 0bb583b46342da6f ]--- INFO: lockdep is turned off. We don't really need to get the platform irq again when we're enabling or disabling the per-cpu irq. Furthermore, we don't really need to set and clear bits in the active_irqs bitmask because that's only used in the non-percpu irq case to figure out when the last CPU PMU has been disabled. Just pass the irq directly to the enable/disable functions to clean all this up. This should be slightly more efficient and also fix the scheduling while atomic bug. Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU" Reported-by: Rob Clark Acked-by: Will Deacon Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/perf_event_cpu.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edb..4bf4cce 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) static void cpu_pmu_enable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; enable_percpu_irq(irq, IRQ_TYPE_NONE); - cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); } static void cpu_pmu_disable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; - cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); disable_percpu_irq(irq); } @@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &percpu_pmu); } else { for (i = 0; i < irqs; ++i) { @@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irq); return err; } - on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { err = 0; -- cgit v1.1 From 8b521cb2947d8811b4cf7fc6a7a6ebde35218243 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 16 Sep 2014 20:41:43 +0100 Subject: ARM: 8152/1: Convert pr_warning to pr_warn Use the more common pr_warn. Other miscellanea: o Coalesce formats o Realign arguments Signed-off-by: Joe Perches Signed-off-by: Russell King --- arch/arm/kernel/atags_parse.c | 2 +- arch/arm/kernel/hw_breakpoint.c | 18 +++++++++--------- arch/arm/kernel/irq.c | 4 ++-- arch/arm/kernel/perf_event_cpu.c | 4 ++-- arch/arm/kernel/smp.c | 2 +- arch/arm/kernel/unwind.c | 24 ++++++++++++------------ 6 files changed, 27 insertions(+), 27 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 7807ef5..528f8af 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -130,7 +130,7 @@ static int __init parse_tag_cmdline(const struct tag *tag) strlcat(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); #elif defined(CONFIG_CMDLINE_FORCE) - pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); + pr_warn("Ignoring tag cmdline (using the default kernel command line)\n"); #else strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 4d963fb..b5b452f 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -113,8 +113,8 @@ static u32 read_wb_reg(int n) GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); default: - pr_warning("attempt to read from unknown breakpoint " - "register %d\n", n); + pr_warn("attempt to read from unknown breakpoint register %d\n", + n); } return val; @@ -128,8 +128,8 @@ static void write_wb_reg(int n, u32 val) GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); default: - pr_warning("attempt to write to unknown breakpoint " - "register %d\n", n); + pr_warn("attempt to write to unknown breakpoint register %d\n", + n); } isb(); } @@ -292,7 +292,7 @@ int hw_breakpoint_slots(int type) case TYPE_DATA: return get_num_wrps(); default: - pr_warning("unknown slot type: %d\n", type); + pr_warn("unknown slot type: %d\n", type); return 0; } } @@ -365,7 +365,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) } if (i == max_slots) { - pr_warning("Can't find any breakpoint slot\n"); + pr_warn("Can't find any breakpoint slot\n"); return -EBUSY; } @@ -417,7 +417,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) } if (i == max_slots) { - pr_warning("Can't find any breakpoint slot\n"); + pr_warn("Can't find any breakpoint slot\n"); return; } @@ -894,8 +894,8 @@ static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) { int cpu = smp_processor_id(); - pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", - instr, cpu); + pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n", + instr, cpu); /* Set the error flag for this CPU and skip the faulting instruction. */ cpumask_set_cpu(cpu, &debug_err_mask); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c42576..40cbca6 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -205,8 +205,8 @@ void migrate_irqs(void) raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) - pr_warning("IRQ%u no longer affine to CPU%u\n", i, - smp_processor_id()); + pr_warn("IRQ%u no longer affine to CPU%u\n", + i, smp_processor_id()); } local_irq_restore(flags); diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edb..101dbab 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -152,8 +152,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) * continue. Otherwise, continue without this interrupt. */ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); continue; } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index d19aea4..39c74a2 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -646,7 +646,7 @@ void smp_send_stop(void) udelay(1); if (num_online_cpus() > 1) - pr_warning("SMP: failed to stop secondary CPUs\n"); + pr_warn("SMP: failed to stop secondary CPUs\n"); } /* diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index a61a1df..cbb85c5 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -157,7 +157,7 @@ static const struct unwind_idx *search_index(unsigned long addr, if (likely(start->addr_offset <= addr_prel31)) return start; else { - pr_warning("unwind: Unknown symbol address %08lx\n", addr); + pr_warn("unwind: Unknown symbol address %08lx\n", addr); return NULL; } } @@ -225,7 +225,7 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) unsigned long ret; if (ctrl->entries <= 0) { - pr_warning("unwind: Corrupt unwind table\n"); + pr_warn("unwind: Corrupt unwind table\n"); return 0; } @@ -333,8 +333,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) insn = (insn << 8) | unwind_get_byte(ctrl); mask = insn & 0x0fff; if (mask == 0) { - pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n", - insn); + pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n", + insn); return -URC_FAILURE; } @@ -357,8 +357,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) unsigned long mask = unwind_get_byte(ctrl); if (mask == 0 || mask & 0xf0) { - pr_warning("unwind: Spare encoding %04lx\n", - (insn << 8) | mask); + pr_warn("unwind: Spare encoding %04lx\n", + (insn << 8) | mask); return -URC_FAILURE; } @@ -370,7 +370,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { - pr_warning("unwind: Unhandled instruction %02lx\n", insn); + pr_warn("unwind: Unhandled instruction %02lx\n", insn); return -URC_FAILURE; } @@ -403,7 +403,7 @@ int unwind_frame(struct stackframe *frame) idx = unwind_find_idx(frame->pc); if (!idx) { - pr_warning("unwind: Index not found %08lx\n", frame->pc); + pr_warn("unwind: Index not found %08lx\n", frame->pc); return -URC_FAILURE; } @@ -422,8 +422,8 @@ int unwind_frame(struct stackframe *frame) /* only personality routine 0 supported in the index */ ctrl.insn = &idx->insn; else { - pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n", - idx->insn, idx); + pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n", + idx->insn, idx); return -URC_FAILURE; } @@ -435,8 +435,8 @@ int unwind_frame(struct stackframe *frame) ctrl.byte = 1; ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); } else { - pr_warning("unwind: Unsupported personality routine %08lx at %p\n", - *ctrl.insn, ctrl.insn); + pr_warn("unwind: Unsupported personality routine %08lx at %p\n", + *ctrl.insn, ctrl.insn); return -URC_FAILURE; } -- cgit v1.1 From 195b58add463f697fb802ed55e26759094d40a54 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 28 Aug 2014 13:08:14 +0100 Subject: ARM: Avoid writing to control register on every exception If we are not changing the control register value, avoid writing to it. Writes to the control register can be very expensive, taking around a hundred cycles or so. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 10 +++++++--- arch/arm/kernel/entry-common.S | 2 +- arch/arm/kernel/entry-header.S | 16 ++++++++++++---- 3 files changed, 20 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 36276cd..3fe61de 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -321,6 +321,9 @@ ENDPROC(__pabt_svc) ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) + ATRAP( mrc p15, 0, r7, c1, c0, 0) + ATRAP( ldr r8, .LCcralign) + ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" @@ -328,6 +331,8 @@ ENDPROC(__pabt_svc) str r3, [sp] @ save the "real" r0 copied @ from the exception stack + ATRAP( ldr r8, [r8, #0]) + @ @ We are now ready to fill in the remaining blanks on the stack: @ @@ -341,10 +346,9 @@ ENDPROC(__pabt_svc) ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) - @ @ Enable the alignment trap while in kernel mode - @ - alignment_trap r0, .LCcralign + ATRAP( teq r8, r7) + ATRAP( mcrne p15, 0, r8, c1, c0, 0) @ @ Clear FP to mark the first stack frame diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e52fe5a..6bb09d4 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -366,7 +366,7 @@ ENTRY(vector_swi) str r0, [sp, #S_OLD_R0] @ Save OLD_R0 #endif zero_fp - alignment_trap ip, __cr_alignment + alignment_trap r10, ip, __cr_alignment enable_irq ct_user_exit get_thread_info tsk diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 8db307d..7b72939 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -37,11 +37,19 @@ #endif .endm - .macro alignment_trap, rtemp, label #ifdef CONFIG_ALIGNMENT_TRAP - ldr \rtemp, \label - ldr \rtemp, [\rtemp] - mcr p15, 0, \rtemp, c1, c0 +#define ATRAP(x...) x +#else +#define ATRAP(x...) +#endif + + .macro alignment_trap, rtmp1, rtmp2, label +#ifdef CONFIG_ALIGNMENT_TRAP + mrc p15, 0, \rtmp2, c1, c0, 0 + ldr \rtmp1, \label + ldr \rtmp1, [\rtmp1] + teq \rtmp1, \rtmp2 + mcrne p15, 0, \rtmp1, c1, c0, 0 #endif .endm -- cgit v1.1 From 02e0409a65560da66a747d2ac6023715b04659ea Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 22 Sep 2014 22:08:42 +0100 Subject: ARM: 8154/1: use _install_special_mapping for sigpage _install_special_mapping allows the VMA to be identifed in /proc/pid/maps without the use of arch_vma_name, providing a slight net reduction in object size: text data bss dec hex filename 2996 96 144 3236 ca4 arch/arm/kernel/process.o (before) 2956 104 144 3204 c84 arch/arm/kernel/process.o (after) Signed-off-by: Nathan Lynch Reviewed-by: Kees Cook Signed-off-by: Russell King --- arch/arm/kernel/process.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 81ef686..46fbbb3 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -472,19 +472,23 @@ int in_gate_area_no_mm(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { - return is_gate_vma(vma) ? "[vectors]" : - (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? - "[sigpage]" : NULL; + return is_gate_vma(vma) ? "[vectors]" : NULL; } static struct page *signal_page; extern struct page *get_signal_page(void); +static const struct vm_special_mapping sigpage_mapping = { + .name = "[sigpage]", + .pages = &signal_page, +}; + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; unsigned long addr; - int ret; + int ret = 0; if (!signal_page) signal_page = get_signal_page(); @@ -498,12 +502,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto up_fail; } - ret = install_special_mapping(mm, addr, PAGE_SIZE, + vma = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - &signal_page); + &sigpage_mapping); + + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto up_fail; + } - if (ret == 0) - mm->context.sigpage = addr; + mm->context.sigpage = addr; up_fail: up_write(&mm->mmap_sem); -- cgit v1.1 From 389522b0c0530658eb9f9a53410ec2494616d785 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 22 Sep 2014 22:12:35 +0100 Subject: ARM: 8155/1: place sigpage at a random offset above stack The sigpage is currently placed alongside shared libraries etc in the address space. Similar to what x86_64 does for its VDSO, place the sigpage at a randomized offset above the stack so that learning the base address of the sigpage doesn't help expose where shared libraries are loaded in the address space (and vice versa). Signed-off-by: Nathan Lynch Reviewed-by: Kees Cook Signed-off-by: Russell King --- arch/arm/kernel/process.c | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 46fbbb3..edd2ac3 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -475,6 +475,39 @@ const char *arch_vma_name(struct vm_area_struct *vma) return is_gate_vma(vma) ? "[vectors]" : NULL; } +/* If possible, provide a placement hint at a random offset from the + * stack for the signal page. + */ +static unsigned long sigpage_addr(const struct mm_struct *mm, + unsigned int npages) +{ + unsigned long offset; + unsigned long first; + unsigned long last; + unsigned long addr; + unsigned int slots; + + first = PAGE_ALIGN(mm->start_stack); + + last = TASK_SIZE - (npages << PAGE_SHIFT); + + /* No room after stack? */ + if (first > last) + return 0; + + /* Just enough room? */ + if (first == last) + return first; + + slots = ((last - first) >> PAGE_SHIFT) + 1; + + offset = get_random_int() % slots; + + addr = first + (offset << PAGE_SHIFT); + + return addr; +} + static struct page *signal_page; extern struct page *get_signal_page(void); @@ -488,6 +521,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr; + unsigned long hint; int ret = 0; if (!signal_page) @@ -496,7 +530,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -ENOMEM; down_write(&mm->mmap_sem); - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + hint = sigpage_addr(mm, 1); + addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; -- cgit v1.1 From aeea3592a13bf12861943e44fc48f1f270941f8d Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Wed, 24 Sep 2014 01:06:46 +0100 Subject: ARM: 8158/1: LLVMLinux: use static inline in ARM ftrace.h With compilers which follow the C99 standard (like modern versions of gcc and clang), "extern inline" does the wrong thing (emits code for an externally linkable version of the inline function). In this case using static inline and removing the NULL version of return_address in return_address.c does the right thing. Signed-off-by: Behan Webster Reviewed-by: Mark Charlebois Acked-by: Steven Rostedt Signed-off-by: Russell King --- arch/arm/kernel/return_address.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index fafedd8..f6aa84d 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -63,11 +63,6 @@ void *return_address(unsigned int level) #warning "TODO: return_address should use unwind tables" #endif -void *return_address(unsigned int level) -{ - return NULL; -} - #endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */ EXPORT_SYMBOL_GPL(return_address); -- cgit v1.1 From e16343c47e4276f5ebc77ca16feb5e50ca1918f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 24 Sep 2014 08:51:57 +0100 Subject: ARM: 8160/1: drop warning about return_address not using unwind tables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The warning was introduced in 2009 (commit 4bf1fa5a34aa ([ARM] 5613/1: implement CALLER_ADDRESSx)). The only "problem" here is that CALLER_ADDRESSx for x > 1 returns NULL which doesn't do much harm. The drawback of implementing a fix (i.e. use unwind tables to implement CALLER_ADDRESSx) is that much of the unwinder code would need to be marked as not traceable. Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/kernel/return_address.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index f6aa84d..98ea4b7 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -59,10 +59,6 @@ void *return_address(unsigned int level) #else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ -#if defined(CONFIG_ARM_UNWIND) -#warning "TODO: return_address should use unwind tables" -#endif - #endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */ EXPORT_SYMBOL_GPL(return_address); -- cgit v1.1 From ad684dce87fac52738649e62b4afa25081b52a28 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Tue, 30 Sep 2014 10:25:10 +0100 Subject: ARM: 8179/1: kprobes-test: Fix compile error "bad immediate value for offset" When compiling kprobes-test-arm.c the following error has been observed /tmp/ccoT403o.s:21439: Error: bad immediate value for offset (4168) This is caused by the compiler spilling it's literal pool too far away from the site which is trying to reference it with a PC relative load. This arises because the compiler is underestimating the size of the inline assembler code present, which apparently it approximates as 4 bytes per line or instruction. We fix this problem by moving the operations which generate more than 4 bytes out of the text section. Specifically, moving the .ascii directives to the .rodata section. Signed-off-by: Jon Medhurst Signed-off-by: Russell King --- arch/arm/kernel/kprobes-test.c | 16 +++++++++------- arch/arm/kernel/kprobes-test.h | 5 ++++- 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c index 08d7312..b206d77 100644 --- a/arch/arm/kernel/kprobes-test.c +++ b/arch/arm/kernel/kprobes-test.c @@ -110,10 +110,13 @@ * * @ TESTCASE_START * bl __kprobes_test_case_start - * @ start of inline data... + * .pushsection .rodata + * "10: * .ascii "mov r0, r7" @ text title for test case * .byte 0 - * .align 2, 0 + * .popsection + * @ start of inline data... + * .word 10b @ pointer to title in .rodata section * * @ TEST_ARG_REG * .byte ARG_TYPE_REG @@ -971,7 +974,7 @@ void __naked __kprobes_test_case_start(void) __asm__ __volatile__ ( "stmdb sp!, {r4-r11} \n\t" "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "bic r0, lr, #1 @ r0 = inline title string \n\t" + "bic r0, lr, #1 @ r0 = inline data \n\t" "mov r1, sp \n\t" "bl kprobes_test_case_start \n\t" "bx r0 \n\t" @@ -1349,15 +1352,14 @@ static unsigned long next_instruction(unsigned long pc) return pc + 4; } -static uintptr_t __used kprobes_test_case_start(const char *title, void *stack) +static uintptr_t __used kprobes_test_case_start(const char **title, void *stack) { struct test_arg *args; struct test_arg_end *end_arg; unsigned long test_code; - args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4); - - current_title = title; + current_title = *title++; + args = (struct test_arg *)title; current_args = args; current_stack = stack; diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h index eecc90a..4430990 100644 --- a/arch/arm/kernel/kprobes-test.h +++ b/arch/arm/kernel/kprobes-test.h @@ -111,11 +111,14 @@ struct test_arg_end { #define TESTCASE_START(title) \ __asm__ __volatile__ ( \ "bl __kprobes_test_case_start \n\t" \ + ".pushsection .rodata \n\t" \ + "10: \n\t" \ /* don't use .asciz here as 'title' may be */ \ /* multiple strings to be concatenated. */ \ ".ascii "#title" \n\t" \ ".byte 0 \n\t" \ - ".align 2, 0 \n\t" + ".popsection \n\t" \ + ".word 10b \n\t" #define TEST_ARG_REG(reg, val) \ ".byte "__stringify(ARG_TYPE_REG)" \n\t" \ -- cgit v1.1 From 562c85cadb065e33ec9f651b8d41cdfd3054a5d0 Mon Sep 17 00:00:00 2001 From: Yalin Wang Date: Fri, 26 Sep 2014 03:30:59 +0100 Subject: ARM: 8168/1: extend __init_end to a page align address This patch changes the __init_end address to a page align address, so that free_initmem() can free the whole .init section, because if the end address is not page aligned, it will round down to a page align address, then the tail unligned page will not be freed. Signed-off-by: wang Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6f57cb9..8e95aa4 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -219,8 +219,8 @@ SECTIONS __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; #else - __init_end = .; . = ALIGN(THREAD_SIZE); + __init_end = .; __data_loc = .; #endif -- cgit v1.1