From 6468178767c4c7e600fdbf50aaca03f4a8a82ca4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 12 Nov 2013 17:11:53 +0000 Subject: arm64: let the core code deal with preempt_count Commit f27dde8deef3 (sched: Add NEED_RESCHED to the preempt_count) introduced the use of bit 31 in preempt_count for obscure scheduling purposes. This causes interrupts taken from EL0 to hit the (open coded) BUG when this flag is flipped while handling the interrupt (we compare the values before and after, and kill the kernel if they are different). The fix is to stop messing with the preempt count entirely, as this is already being dealt with in the generic code (irq_enter/irq_exit). Tested on a dual A53 FPGA running cyclictest. Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e116614..4d2c6f3 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -309,15 +309,12 @@ el1_irq: #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif -#ifdef CONFIG_PREEMPT - get_thread_info tsk - ldr w24, [tsk, #TI_PREEMPT] // get preempt count - add w0, w24, #1 // increment it - str w0, [tsk, #TI_PREEMPT] -#endif + irq_handler + #ifdef CONFIG_PREEMPT - str w24, [tsk, #TI_PREEMPT] // restore preempt count + get_thread_info tsk + ldr w24, [tsk, #TI_PREEMPT] // restore preempt count cbnz w24, 1f // preempt count != 0 ldr x0, [tsk, #TI_FLAGS] // get flags tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? @@ -507,22 +504,10 @@ el0_irq_naked: #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - get_thread_info tsk -#ifdef CONFIG_PREEMPT - ldr w24, [tsk, #TI_PREEMPT] // get preempt count - add w23, w24, #1 // increment it - str w23, [tsk, #TI_PREEMPT] -#endif + irq_handler -#ifdef CONFIG_PREEMPT - ldr w0, [tsk, #TI_PREEMPT] - str w24, [tsk, #TI_PREEMPT] - cmp w0, w23 - b.eq 1f - mov x1, #0 - str x1, [x1] // BUG -1: -#endif + get_thread_info tsk + #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on #endif -- cgit v1.1 From b3bf6aa7e79117419f7eddccf0b7af4382d823c3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 21 Nov 2013 14:46:17 +0000 Subject: arm64: Unmask asynchronous aborts when in kernel mode The asynchronous aborts are generally fatal for the kernel but they can be masked via the pstate A bit. If a system error happens while in kernel mode, it won't be visible until returning to user space. This patch enables this kind of abort early to help identifying the cause. Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 5 +++++ arch/arm64/kernel/smp.c | 1 + 2 files changed, 6 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 0bc5e4c..bd9bbd0 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -205,6 +205,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { + /* + * Unmask asynchronous aborts early to catch possible system errors. + */ + local_async_enable(); + setup_processor(); setup_machine_fdt(__fdt_pointer); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a5aeefa..a0c2ca6 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -160,6 +160,7 @@ asmlinkage void secondary_start_kernel(void) local_irq_enable(); local_fiq_enable(); + local_async_enable(); /* * OK, it's off to the idle thread for us -- cgit v1.1 From 6a2e5e521c333a0b56cb60dc5587e3f90859c5e7 Mon Sep 17 00:00:00 2001 From: Matthew Leach Date: Thu, 28 Nov 2013 12:07:22 +0000 Subject: arm64: ptrace: fix compat registes get/set to be endian clean On a BE system the wrong half of the X registers is retrieved/written when attempting to get/set the value of aarch32 registers through ptrace. Ensure that types are the correct width so that the relevant casting occurs. Signed-off-by: Matthew Leach Reviewed-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index fecdbf7..6777a21 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -636,28 +636,27 @@ static int compat_gpr_get(struct task_struct *target, for (i = 0; i < num_regs; ++i) { unsigned int idx = start + i; - void *reg; + compat_ulong_t reg; switch (idx) { case 15: - reg = (void *)&task_pt_regs(target)->pc; + reg = task_pt_regs(target)->pc; break; case 16: - reg = (void *)&task_pt_regs(target)->pstate; + reg = task_pt_regs(target)->pstate; break; case 17: - reg = (void *)&task_pt_regs(target)->orig_x0; + reg = task_pt_regs(target)->orig_x0; break; default: - reg = (void *)&task_pt_regs(target)->regs[idx]; + reg = task_pt_regs(target)->regs[idx]; } - ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); - + ret = copy_to_user(ubuf, ®, sizeof(reg)); if (ret) break; - else - ubuf += sizeof(compat_ulong_t); + + ubuf += sizeof(reg); } return ret; @@ -685,28 +684,28 @@ static int compat_gpr_set(struct task_struct *target, for (i = 0; i < num_regs; ++i) { unsigned int idx = start + i; - void *reg; + compat_ulong_t reg; + + ret = copy_from_user(®, ubuf, sizeof(reg)); + if (ret) + return ret; + + ubuf += sizeof(reg); switch (idx) { case 15: - reg = (void *)&newregs.pc; + newregs.pc = reg; break; case 16: - reg = (void *)&newregs.pstate; + newregs.pstate = reg; break; case 17: - reg = (void *)&newregs.orig_x0; + newregs.orig_x0 = reg; break; default: - reg = (void *)&newregs.regs[idx]; + newregs.regs[idx] = reg; } - ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t)); - - if (ret) - goto out; - else - ubuf += sizeof(compat_ulong_t); } if (valid_user_regs(&newregs.user_regs)) @@ -714,7 +713,6 @@ static int compat_gpr_set(struct task_struct *target, else ret = -EINVAL; -out: return ret; } -- cgit v1.1 From 2dacab73dc9f86ad12eb41bc3355d7f492696bca Mon Sep 17 00:00:00 2001 From: Matthew Leach Date: Thu, 28 Nov 2013 12:07:23 +0000 Subject: arm64: debug: make aarch32 bkpt checking endian clean The current breakpoint instruction checking code for A32 is not endian clean. Fix this with appropriate byte-swapping when retrieving instructions. Signed-off-by: Matthew Leach Reviewed-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/debug-monitors.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 6a0a9b1..4ae6857 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -248,7 +248,8 @@ static int brk_handler(unsigned long addr, unsigned int esr, int aarch32_break_handler(struct pt_regs *regs) { siginfo_t info; - unsigned int instr; + u32 arm_instr; + u16 thumb_instr; bool bp = false; void __user *pc = (void __user *)instruction_pointer(regs); @@ -257,18 +258,21 @@ int aarch32_break_handler(struct pt_regs *regs) if (compat_thumb_mode(regs)) { /* get 16-bit Thumb instruction */ - get_user(instr, (u16 __user *)pc); - if (instr == AARCH32_BREAK_THUMB2_LO) { + get_user(thumb_instr, (u16 __user *)pc); + thumb_instr = le16_to_cpu(thumb_instr); + if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { /* get second half of 32-bit Thumb-2 instruction */ - get_user(instr, (u16 __user *)(pc + 2)); - bp = instr == AARCH32_BREAK_THUMB2_HI; + get_user(thumb_instr, (u16 __user *)(pc + 2)); + thumb_instr = le16_to_cpu(thumb_instr); + bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; } else { - bp = instr == AARCH32_BREAK_THUMB; + bp = thumb_instr == AARCH32_BREAK_THUMB; } } else { /* 32-bit ARM instruction */ - get_user(instr, (u32 __user *)pc); - bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; + get_user(arm_instr, (u32 __user *)pc); + arm_instr = le32_to_cpu(arm_instr); + bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; } if (!bp) -- cgit v1.1 From 85cc00eaa81dfa0f5bf8076c48f3ee2c2c4a77ba Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 18 Nov 2013 18:56:42 +0000 Subject: arm64: kernel: add code to set cpu boot mode to secondary_entry shim The refactoring of el2_setup split code setting up EL2 and detecting the CPU boot mode in separate chunks. This allows the code that sets up EL2 to run in an endian independent way - ie before the endianess is set up in the respective sctlr registers. This patch brings secondary_entry up-to-date so that CPUs entering the kernel through this code path set-up EL2 and the cpu boot mode properly. Signed-off-by: Lorenzo Pieralisi Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7009387..c68cca5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen) * be used where CPUs are brought online dynamically by the kernel. */ ENTRY(secondary_entry) - bl __calc_phys_offset // x2=phys offset bl el2_setup // Drop to EL1 + bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET + bl set_cpu_boot_mode_flag b secondary_startup ENDPROC(secondary_entry) -- cgit v1.1 From cdc27c27843248ae7eb0df5fc261dd004eaa5670 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 17 Dec 2013 17:09:08 +0000 Subject: arm64: ptrace: avoid using HW_BREAKPOINT_EMPTY for disabled events Commit 8f34a1da35ae ("arm64: ptrace: use HW_BREAKPOINT_EMPTY type for disabled breakpoints") fixed an issue with GDB trying to zero breakpoint control registers. The problem there is that the arch hw_breakpoint code will attempt to create a (disabled), execute breakpoint of length 0. This will fail validation and report unexpected failure to GDB. To avoid this, we treated disabled breakpoints as HW_BREAKPOINT_EMPTY, but that seems to have broken with recent kernels, causing watchpoints to be treated as TYPE_INST in the core code and returning ENOSPC for any further breakpoints. This patch fixes the problem by prioritising the `enable' field of the breakpoint: if it is cleared, we simply update the perf_event_attr to indicate that the thing is disabled and don't bother changing either the type or the length. This reinforces the behaviour that the breakpoint control register is essentially read-only apart from the enable bit when disabling a breakpoint. Cc: Reported-by: Aaron Liu Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6777a21..6a8928b 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, { int err, len, type, disabled = !ctrl.enabled; - if (disabled) { - len = 0; - type = HW_BREAKPOINT_EMPTY; - } else { - err = arch_bp_generic_fields(ctrl, &len, &type); - if (err) - return err; - - switch (note_type) { - case NT_ARM_HW_BREAK: - if ((type & HW_BREAKPOINT_X) != type) - return -EINVAL; - break; - case NT_ARM_HW_WATCH: - if ((type & HW_BREAKPOINT_RW) != type) - return -EINVAL; - break; - default: + attr->disabled = disabled; + if (disabled) + return 0; + + err = arch_bp_generic_fields(ctrl, &len, &type); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) return -EINVAL; - } + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: + return -EINVAL; } attr->bp_len = len; attr->bp_type = type; - attr->disabled = disabled; return 0; } -- cgit v1.1