From 0777e3e1723f69276136140209c11deeecb7c6dc Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Aug 2014 21:15:19 +0100 Subject: ARM: 8125/1: crypto: enable NEON SHA-1 for big endian This tweaks the SHA-1 NEON code slightly so it works correctly under big endian, and removes the Kconfig condition preventing it from being selected if CONFIG_CPU_BIG_ENDIAN is set. Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King --- arch/arm/crypto/sha1-armv7-neon.S | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S index 50013c0..dcd01f3 100644 --- a/arch/arm/crypto/sha1-armv7-neon.S +++ b/arch/arm/crypto/sha1-armv7-neon.S @@ -9,7 +9,7 @@ */ #include - +#include .syntax unified .code 32 @@ -61,13 +61,13 @@ #define RT3 r12 #define W0 q0 -#define W1 q1 +#define W1 q7 #define W2 q2 #define W3 q3 #define W4 q4 -#define W5 q5 -#define W6 q6 -#define W7 q7 +#define W5 q6 +#define W6 q5 +#define W7 q1 #define tmp0 q8 #define tmp1 q9 @@ -79,6 +79,11 @@ #define qK3 q14 #define qK4 q15 +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ARM_LE(code...) +#else +#define ARM_LE(code...) code +#endif /* Round function macros. */ @@ -150,45 +155,45 @@ #define W_PRECALC_00_15() \ add RWK, sp, #(WK_offs(0)); \ \ - vld1.32 {tmp0, tmp1}, [RDATA]!; \ - vrev32.8 W0, tmp0; /* big => little */ \ - vld1.32 {tmp2, tmp3}, [RDATA]!; \ + vld1.32 {W0, W7}, [RDATA]!; \ + ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \ + vld1.32 {W6, W5}, [RDATA]!; \ vadd.u32 tmp0, W0, curK; \ - vrev32.8 W7, tmp1; /* big => little */ \ - vrev32.8 W6, tmp2; /* big => little */ \ + ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \ + ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \ vadd.u32 tmp1, W7, curK; \ - vrev32.8 W5, tmp3; /* big => little */ \ + ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \ vadd.u32 tmp2, W6, curK; \ vst1.32 {tmp0, tmp1}, [RWK]!; \ vadd.u32 tmp3, W5, curK; \ vst1.32 {tmp2, tmp3}, [RWK]; \ #define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vld1.32 {tmp0, tmp1}, [RDATA]!; \ + vld1.32 {W0, W7}, [RDATA]!; \ #define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ add RWK, sp, #(WK_offs(0)); \ #define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W0, tmp0; /* big => little */ \ + ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \ #define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vld1.32 {tmp2, tmp3}, [RDATA]!; \ + vld1.32 {W6, W5}, [RDATA]!; \ #define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ vadd.u32 tmp0, W0, curK; \ #define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W7, tmp1; /* big => little */ \ + ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \ #define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W6, tmp2; /* big => little */ \ + ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \ #define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ vadd.u32 tmp1, W7, curK; \ #define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W5, tmp3; /* big => little */ \ + ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \ #define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ vadd.u32 tmp2, W6, curK; \ -- cgit v1.1 From 084bb5bc00c19ec32b45f44d11ba6a0ca2514ec3 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 20 Aug 2014 20:49:54 +0100 Subject: ARM: 8131/1: arm/smp: Absorb boot_secondary() After becoming a mandatory function, boot_secondary() is no longer used outside arch/arm/kernel/smp.c. Hence remove its public prototype, and, as suggested by Arnd, let it be absorbed by its single caller. Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/smp.h | 6 ------ arch/arm/kernel/smp.c | 12 ++++-------- 2 files changed, 4 insertions(+), 14 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2ec765c..18f5a55 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -49,12 +49,6 @@ extern void smp_init_cpus(void); extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); /* - * Boot a secondary CPU, and assign it the specified idle task. - * This also gives us the initial stack to use for this CPU. - */ -extern int boot_secondary(unsigned int cpu, struct task_struct *); - -/* * Called from platform specific assembly code, this is the * secondary CPU entry point. */ diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 9388a3d..d19aea4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -95,6 +95,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; + if (!smp_ops.smp_boot_secondary) + return -ENOSYS; + /* * We need to tell the secondary core where to find * its stack and the page tables. @@ -113,7 +116,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) /* * Now bring the CPU into our world. */ - ret = boot_secondary(cpu, idle); + ret = smp_ops.smp_boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it @@ -142,13 +145,6 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -int boot_secondary(unsigned int cpu, struct task_struct *idle) -{ - if (smp_ops.smp_boot_secondary) - return smp_ops.smp_boot_secondary(cpu, idle); - return -ENOSYS; -} - int platform_can_cpu_hotplug(void) { #ifdef CONFIG_HOTPLUG_CPU -- cgit v1.1 From 7e66cbc93f876e24c2f8434cf011b94be242005e Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 28 Aug 2014 12:49:36 +0100 Subject: ARM: 8132/1: LPAE: drop wrong carry flag correction after adding TTBR1_OFFSET ARM: LPAE: drop wrong carry flag correction after adding TTBR1_OFFSET In commit 7fb00c2fca4b6c58be521eb3676cf4b4ba8dde3b ("ARM: 8114/1: LPAE: load upper bits of early TTBR0/TTBR1") part which fixes carrying in adding TTBR1_OFFSET to TTRR1 was wrong: addls ttbr1, ttbr1, #TTBR1_OFFSET adcls tmp, tmp, #0 addls doesn't update flags, adcls adds carry from cmp above: cmp ttbr1, tmp @ PHYS_OFFSET > PAGE_OFFSET? Condition 'ls' means carry flag is clear or zero flag is set, thus only one case is affected: when PHYS_OFFSET == PAGE_OFFSET. It seems safer to remove this fixup. Bug is here for ages and nobody complained. Let's fix it separately. Reported-and-Tested-by: Jassi Brar Signed-off-by: Konstantin Khlebnikov Signed-off-by: Russell King --- arch/arm/mm/proc-v7-3level.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 1a24e92..b64e67c 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext) mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits addls \ttbr1, \ttbr1, #TTBR1_OFFSET - adcls \tmp, \tmp, #0 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits -- cgit v1.1 From a040803a9d6b8c1876d3487a5cb69602ebcbb82c Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 1 Sep 2014 17:14:29 +0100 Subject: ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs Since commit 1dbfa187dad ("ARM: irq migration: force migration off CPU going down") the ARM interrupt migration code on cpu offline calls irqchip.irq_set_affinity() with the argument force=true. At the point of this change the argument had no effect because it was not used by any interrupt chip driver and there was no semantics defined. This changed with commit 01f8fa4f01d8 ("genirq: Allow forcing cpu affinity of interrupts") which made the force argument useful to route interrupts to not yet online cpus without checking the target cpu against the cpu online mask. The following commit ffde1de64012 ("irqchip: gic: Support forced affinity setting") implemented this for the GIC interrupt controller. As a consequence the ARM cpu offline irq migration fails if CPU0 is offlined, because CPU0 is still set in the affinity mask and the validataion against cpu online mask is skipped to the force argument being true. The following first_cpu(mask) selection always selects CPU0 as the target. Solve the issue by calling irq_set_affinity() with force=false from the CPU offline irq migration code so the GIC driver validates the affinity mask against CPU online mask and therefore removes CPU0 from the possible target candidates. Tested on TC2 hotpluging CPU0 in and out. Without this patch the system locks up as the IRQs are not migrated away from CPU0. Signed-off-by: Sudeep Holla Acked-by: Thomas Gleixner Acked-by: Mark Rutland Cc: # 3.10.x Signed-off-by: Russell King --- arch/arm/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c42576..5c4d38e 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc) c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; -- cgit v1.1 From 52aaac5ae52ad9a7016410ffeedbaf24b722f3a2 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 8 Sep 2014 14:29:25 -0700 Subject: ARM: OMAP: Fix Kconfig warning for omap1 Commit 21278aeafbfa ("ARM: use menuconfig for sub-arch menus") improved the sub-arch menus, but accidentally caused new warnings for omap1. This was because the commit added a menu entry around config ARCH_OMAP bool entry where the menu had depends on ARCH_MULTI_V6 || ARCH_MULTI_V7. As ARCH_OMAP is shared between omap1 and omap2plus, let's fix the issue by defining ARCH_OMAP in the shared plat-omap/Kconfig. Fixes: 21278aeafbfa ("ARM: use menuconfig for sub-arch menus") Reported-by: Andreas Ruprecht Signed-off-by: Tony Lindgren Signed-off-by: Arnd Bergmann --- arch/arm/mach-omap2/Kconfig | 3 --- arch/arm/plat-omap/Kconfig | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index e7189dc..08d4167 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -1,9 +1,6 @@ menu "TI OMAP/AM/DM/DRA Family" depends on ARCH_MULTI_V6 || ARCH_MULTI_V7 -config ARCH_OMAP - bool - config ARCH_OMAP2 bool "TI OMAP2" depends on ARCH_MULTI_V6 diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 02fc10d..d055db3 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -1,3 +1,6 @@ +config ARCH_OMAP + bool + if ARCH_OMAP menu "TI OMAP Common Features" -- cgit v1.1 From 3d3c6a5f3de2c76b2d6bf96be56dd6fcc2bb2a30 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 26 Jul 2014 20:49:39 +0200 Subject: ARM: pxa: fix section mismatch warning for pxa_timer_nodt_init commit a38b1f60b5245a3 ("ARM: pxa: Add non device-tree timer link to clocksource") introduced a harmless section mismatch warning for all pxa platforms, by introducing a new pxa_timer_init() function that is not marked __init but that calls pxa_timer_nodt_init(), which is. Signed-off-by: Arnd Bergmann Acked-by: Robert Jarzmik --- arch/arm/mach-pxa/generic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 630fa91..04b013f 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -61,7 +61,7 @@ EXPORT_SYMBOL(get_clock_tick_rate); /* * For non device-tree builds, keep legacy timer init */ -void pxa_timer_init(void) +void __init pxa_timer_init(void) { pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), get_clock_tick_rate()); -- cgit v1.1 From 5990047cef0c6a36eefcb166bd32d99a8f95c75b Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Wed, 10 Sep 2014 08:57:11 -0700 Subject: ARM: dts: dra7-evm: Fix NAND GPMC timings The nand timings were scaled down by 2 to account for the 2x rate returned by clk_get_rate(gpmc_fclk). As the clock data got fixed by [1], revert back to actual timings (i.e. scale them up by 2). Without this NAND doesn't work on dra7-evm. [1] - commit dd94324b983afe114ba9e7ee3649313b451f63ce ARM: dts: dra7xx-clocks: Fix the l3 and l4 clock rates Fixes: ff66a3c86e00 ("ARM: dts: dra7: add support for parallel NAND flash") Cc: [3.16] Signed-off-by: Roger Quadros Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/dra7-evm.dts | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index e03fbf3..b40cdad 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -447,22 +447,19 @@ gpmc,device-width = <2>; gpmc,sync-clk-ps = <0>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <40>; - gpmc,cs-wr-off-ns = <40>; + gpmc,cs-rd-off-ns = <80>; + gpmc,cs-wr-off-ns = <80>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <30>; - gpmc,adv-wr-off-ns = <30>; - gpmc,we-on-ns = <5>; - gpmc,we-off-ns = <25>; - gpmc,oe-on-ns = <2>; - gpmc,oe-off-ns = <20>; - gpmc,access-ns = <20>; - gpmc,wr-access-ns = <40>; - gpmc,rd-cycle-ns = <40>; - gpmc,wr-cycle-ns = <40>; - gpmc,wait-pin = <0>; - gpmc,wait-on-read; - gpmc,wait-on-write; + gpmc,adv-rd-off-ns = <60>; + gpmc,adv-wr-off-ns = <60>; + gpmc,we-on-ns = <10>; + gpmc,we-off-ns = <50>; + gpmc,oe-on-ns = <4>; + gpmc,oe-off-ns = <40>; + gpmc,access-ns = <40>; + gpmc,wr-access-ns = <80>; + gpmc,rd-cycle-ns = <80>; + gpmc,wr-cycle-ns = <80>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,clk-activation-ns = <0>; -- cgit v1.1 From e847faf44929112c99b3759a533a727ebd71945b Mon Sep 17 00:00:00 2001 From: Dmitry Lifshitz Date: Wed, 10 Sep 2014 08:57:19 -0700 Subject: ARM: dts: cm-t54: fix serial console power supply. LDO8 regulator is used for act led and serial cosole power supply. Its DT status is declared as "disabled", however the serial console was functional until Commit 318dbb02b ("regulator: palmas: Fix SMPS enable/disable/is_enabled") wich properly turns off LDO8 on boot. Fix serial cosole power supply (and act led) on boot by turning LDO8 on. Fixes: 318dbb02b ("regulator: palmas: Fix SMPS enable/disable/is_enabled") Signed-off-by: Dmitry Lifshitz Tested-by: Paul Walmsley Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/omap5-cm-t54.dts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index b8698ca..429471a 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -353,13 +353,12 @@ }; ldo8_reg: ldo8 { - /* VDD_3v0: Does not go anywhere */ + /* VDD_3V_GP: act led/serial console */ regulator-name = "ldo8"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; + regulator-always-on; regulator-boot-on; - /* Unused */ - status = "disabled"; }; ldo9_reg: ldo9 { -- cgit v1.1 From 1b134c9c4b555342be667f144ee714af1c3f6a9f Mon Sep 17 00:00:00 2001 From: Markus Niebel Date: Thu, 11 Sep 2014 15:56:56 +0800 Subject: ARM: DT: imx53: fix lvds channel 1 port using LVDS channel 1 on an i.MX53 leads to following error: imx-ldb 53fa8008.ldb: unable to set di0 parent clock to ldb_di1 This comes from imx_ldb_set_clock with mux = 0. Mux parameter must be "1" for reparenting di1 clock to ldb_di1. The value of the mux param comes from device tree port settings. On i.MX5, the internal two-input-multiplexer is used. Due to hardware limitations, only one port (port@[0,1]) can be used for each channel (lvds-channel@[0,1], respectively) Documentation update suggested by Philipp Zabel Signed-off-by: Markus Niebel Fixes: e05c8c9a790a ("ARM: dts: imx53: Add IPU DI ports and endpoints, move imx-drm node to dtsi") Cc: Acked-by: Philipp Zabel Signed-off-by: Shawn Guo Signed-off-by: Arnd Bergmann --- arch/arm/boot/dts/imx53.dtsi | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c6c58c1..6b675a0 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -423,10 +423,14 @@ status = "disabled"; lvds-channel@0 { + #address-cells = <1>; + #size-cells = <0>; reg = <0>; status = "disabled"; - port { + port@0 { + reg = <0>; + lvds0_in: endpoint { remote-endpoint = <&ipu_di0_lvds0>; }; @@ -434,10 +438,14 @@ }; lvds-channel@1 { + #address-cells = <1>; + #size-cells = <0>; reg = <1>; status = "disabled"; - port { + port@1 { + reg = <1>; + lvds1_in: endpoint { remote-endpoint = <&ipu_di1_lvds1>; }; -- cgit v1.1 From 99897500e54560397054aa238c771fa8284d0eb2 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Thu, 11 Sep 2014 17:41:25 -0400 Subject: ARM: keystone: dts: fix bindings for pcie and usb clock nodes Fix incorrect clock names for usb1, pcie1 and domain register offset for pcie1 clock nodes on K2E EVM Signed-off-by: Murali Karicheri Signed-off-by: Santosh Shilimkar --- arch/arm/boot/dts/k2e-clocks.dtsi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 598afe9..4773d6a 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi @@ -40,7 +40,7 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk16>; - clock-output-names = "usb"; + clock-output-names = "usb1"; reg = <0x02350004 0xb00>, <0x02350000 0x400>; reg-names = "control", "domain"; domain-id = <0>; @@ -60,8 +60,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk12>; - clock-output-names = "pcie"; - reg = <0x0235006c 0xb00>, <0x02350000 0x400>; + clock-output-names = "pcie1"; + reg = <0x0235006c 0xb00>, <0x02350048 0x400>; reg-names = "control", "domain"; domain-id = <18>; }; -- cgit v1.1 From e918a62a2ba81d10a3cc2c513dc70034c9524a95 Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Mon, 1 Sep 2014 17:16:01 +0100 Subject: ARM: 8135/1: Fix in-correct barrier usage in SWP{B} emulation According to the ARM ARMv7, explicit barriers are necessary when using synchronisation primitives such as SWP{B}. The use of these instructions does not automatically imply a barrier and any ordering requirements by the software must be explicitly expressed with the use of suitable barriers. Based on this, remove the barriers from SWP{B} emulation. Acked-by: Will Deacon Signed-off-by: Punit Agrawal Signed-off-by: Russell King --- arch/arm/kernel/swp_emulate.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 67ca857..587fdfe 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, while (1) { unsigned long temp; - /* - * Barrier required between accessing protected resource and - * releasing a lock for it. Legacy code might not have done - * this, and we cannot determine that this is not the case - * being emulated, so insert always. - */ - smp_mb(); - if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else @@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, } if (res == 0) { - /* - * Barrier also required between acquiring a lock for a - * protected resource and accessing the resource. Inserted for - * same reason as above. - */ - smp_mb(); - if (type == TYPE_SWPB) swpbcounter++; else -- cgit v1.1 From d9981380b49b839ecaffbbe131908a342db68980 Mon Sep 17 00:00:00 2001 From: Victor Kamensky Date: Thu, 4 Sep 2014 06:07:33 +0100 Subject: ARM: 8137/1: fix get_user BE behavior for target variable with size of 8 bytes e38361d 'ARM: 8091/2: add get_user() support for 8 byte types' commit broke V7 BE get_user call when target var size is 64 bit, but '*ptr' size is 32 bit or smaller. e38361d changed type of __r2 from 'register unsigned long' to 'register typeof(x) __r2 asm("r2")' i.e before the change even when target variable size was 64 bit, __r2 was still 32 bit. But after e38361d commit, for target var of 64 bit size, __r2 became 64 bit and now it should occupy 2 registers r2, and r3. The issue in BE case that r3 register is least significant word of __r2 and r2 register is most significant word of __r2. But __get_user_4 still copies result into r2 (most significant word of __r2). Subsequent code copies from __r2 into x, but for situation described it will pick up only garbage from r3 register. Special __get_user_64t_(124) functions are introduced. They are similar to corresponding __get_user_(124) function but result stored in r3 register (lsw in case of 64 bit __r2 in BE image). Those function are used by get_user macro in case of BE and target var size is 64bit. Also changed __get_user_lo8 name into __get_user_32t_8 to get consistent naming accross all cases. Signed-off-by: Victor Kamensky Suggested-by: Daniel Thompson Reviewed-by: Daniel Thompson Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 48 ++++++++++++++++++++++++++++++++++-------- arch/arm/lib/getuser.S | 38 +++++++++++++++++++++++++++++++-- 2 files changed, 75 insertions(+), 11 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a4cd7af..4767eb9 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs) extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -extern int __get_user_lo8(void *); +extern int __get_user_32t_8(void *); extern int __get_user_8(void *); +extern int __get_user_64t_1(void *); +extern int __get_user_64t_2(void *); +extern int __get_user_64t_4(void *); #define __GUP_CLOBBER_1 "lr", "cc" #ifdef CONFIG_CPU_USE_DOMAINS @@ -117,7 +120,7 @@ extern int __get_user_8(void *); #define __GUP_CLOBBER_2 "lr", "cc" #endif #define __GUP_CLOBBER_4 "lr", "cc" -#define __GUP_CLOBBER_lo8 "lr", "cc" +#define __GUP_CLOBBER_32t_8 "lr", "cc" #define __GUP_CLOBBER_8 "lr", "cc" #define __get_user_x(__r2,__p,__e,__l,__s) \ @@ -131,12 +134,30 @@ extern int __get_user_8(void *); /* narrowing a double-word get into a single 32bit word register: */ #ifdef __ARMEB__ -#define __get_user_xb(__r2, __p, __e, __l, __s) \ - __get_user_x(__r2, __p, __e, __l, lo8) +#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ + __get_user_x(__r2, __p, __e, __l, 32t_8) #else -#define __get_user_xb __get_user_x +#define __get_user_x_32t __get_user_x #endif +/* + * storing result into proper least significant word of 64bit target var, + * different only for big endian case where 64 bit __r2 lsw is r3: + */ +#ifdef __ARMEB__ +#define __get_user_x_64t(__r2, __p, __e, __l, __s) \ + __asm__ __volatile__ ( \ + __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ + "bl __get_user_64t_" #__s \ + : "=&r" (__e), "=r" (__r2) \ + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) +#else +#define __get_user_x_64t __get_user_x +#endif + + #define __get_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ @@ -146,17 +167,26 @@ extern int __get_user_8(void *); register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, __l, 1); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 1); \ + else \ + __get_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __get_user_x(__r2, __p, __e, __l, 2); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 2); \ + else \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, __l, 4); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 4); \ + else \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ if (sizeof((x)) < 8) \ - __get_user_xb(__r2, __p, __e, __l, 4); \ + __get_user_x_32t(__r2, __p, __e, __l, 4); \ else \ __get_user_x(__r2, __p, __e, __l, 8); \ break; \ diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 9386000..8ecfd15 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -80,7 +80,7 @@ ENTRY(__get_user_8) ENDPROC(__get_user_8) #ifdef __ARMEB__ -ENTRY(__get_user_lo8) +ENTRY(__get_user_32t_8) check_uaccess r0, 8, r1, r2, __get_user_bad #ifdef CONFIG_CPU_USE_DOMAINS add r0, r0, #4 @@ -90,7 +90,37 @@ ENTRY(__get_user_lo8) #endif mov r0, #0 ret lr -ENDPROC(__get_user_lo8) +ENDPROC(__get_user_32t_8) + +ENTRY(__get_user_64t_1) + check_uaccess r0, 1, r1, r2, __get_user_bad8 +8: TUSER(ldrb) r3, [r0] + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_1) + +ENTRY(__get_user_64t_2) + check_uaccess r0, 2, r1, r2, __get_user_bad8 +#ifdef CONFIG_CPU_USE_DOMAINS +rb .req ip +9: ldrbt r3, [r0], #1 +10: ldrbt rb, [r0], #0 +#else +rb .req r0 +9: ldrb r3, [r0] +10: ldrb rb, [r0, #1] +#endif + orr r3, rb, r3, lsl #8 + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_2) + +ENTRY(__get_user_64t_4) + check_uaccess r0, 4, r1, r2, __get_user_bad8 +11: TUSER(ldr) r3, [r0] + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_4) #endif __get_user_bad8: @@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8) .long 6b, __get_user_bad8 #ifdef __ARMEB__ .long 7b, __get_user_bad + .long 8b, __get_user_bad8 + .long 9b, __get_user_bad8 + .long 10b, __get_user_bad8 + .long 11b, __get_user_bad8 #endif .popsection -- cgit v1.1 From abf3878047a92716704942d2a00917927004798b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Sep 2014 13:21:44 +0100 Subject: ARM: 8136/1: sa1100: add Micro ASIC platform device This adds the Atmel Micro ASIC platform device and selects it by default for h3100 and h3600. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-sa1100/Kconfig | 2 ++ arch/arm/mach-sa1100/h3xxx.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index 04f9784..c6f6ed1 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig @@ -58,6 +58,7 @@ config SA1100_H3100 bool "Compaq iPAQ H3100" select ARM_SA1110_CPUFREQ select HTC_EGPIO + select MFD_IPAQ_MICRO help Say Y here if you intend to run this kernel on the Compaq iPAQ H3100 handheld computer. Information about this machine and the @@ -69,6 +70,7 @@ config SA1100_H3600 bool "Compaq iPAQ H3600/H3700" select ARM_SA1110_CPUFREQ select HTC_EGPIO + select MFD_IPAQ_MICRO help Say Y here if you intend to run this kernel on the Compaq iPAQ H3600 handheld computer. Information about this machine and the diff --git a/arch/arm/mach-sa1100/h3xxx.c b/arch/arm/mach-sa1100/h3xxx.c index c79bf46..b1d4faa 100644 --- a/arch/arm/mach-sa1100/h3xxx.c +++ b/arch/arm/mach-sa1100/h3xxx.c @@ -25,6 +25,7 @@ #include #include +#include #include "generic.h" @@ -244,9 +245,23 @@ static struct platform_device h3xxx_keys = { }, }; +static struct resource h3xxx_micro_resources[] = { + DEFINE_RES_MEM(0x80010000, SZ_4K), + DEFINE_RES_MEM(0x80020000, SZ_4K), + DEFINE_RES_IRQ(IRQ_Ser1UART), +}; + +struct platform_device h3xxx_micro_asic = { + .name = "ipaq-h3xxx-micro", + .id = -1, + .resource = h3xxx_micro_resources, + .num_resources = ARRAY_SIZE(h3xxx_micro_resources), +}; + static struct platform_device *h3xxx_devices[] = { &h3xxx_egpio, &h3xxx_keys, + &h3xxx_micro_asic, }; void __init h3xxx_mach_init(void) -- cgit v1.1 From fbf10641487d0c6938e580d143e4519e7ea90acf Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 5 Sep 2014 23:00:33 +0100 Subject: ARM: 8138/1: drop ISAR0 workaround for B15 The Brahma-B15's ISAR0 correcty advertises UDIV/SDIV support in both ARM and Thumb2 modes (CPUID_EXT_ISAR0=02101110), so we don't need to manually apply this hwcap. The code in question actually predates the following commit, which made our hwcaps unnecessary: commit 8164f7af88d9ad3a757bd14f634b23997ee77f6b Author: Stephen Boyd Date: Mon Mar 18 19:44:15 2013 +0100 ARM: 7680/1: Detect support for SDIV/UDIV from ISAR0 register Signed-off-by: Brian Norris Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Russell King --- arch/arm/mm/proc-v7.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b5d67db..b3a9478 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -570,7 +570,7 @@ __v7_ca15mp_proc_info: __v7_b15mp_proc_info: .long 0x420f00f0 .long 0xff0ffff0 - __v7_proc __v7_b15mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_b15mp_setup .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info /* -- cgit v1.1 From 7a0bd49713aca3040099e1413d1cc9f08802d97a Mon Sep 17 00:00:00 2001 From: Victor Kamensky Date: Sat, 13 Sep 2014 23:29:17 +0100 Subject: ARM: 8151/1: add missing exports for asm functions required by get_user macro Previous commits that dealt with get_user for 64bit type missed to export proper functions, so if get_user macro with particular target/value types are used by kernel module modpost would produce 'undefined!' error. Solution is to export all required functions. Signed-off-by: Victor Kamensky Signed-off-by: Russell King --- arch/arm/kernel/armksyms.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f7b450f..a88671c 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); +EXPORT_SYMBOL(__get_user_8); + +#ifdef __ARMEB__ +EXPORT_SYMBOL(__get_user_64t_1); +EXPORT_SYMBOL(__get_user_64t_2); +EXPORT_SYMBOL(__get_user_64t_4); +EXPORT_SYMBOL(__get_user_32t_8); +#endif EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); -- cgit v1.1 From fbfb872f5f417cea48760c535e0ff027c88b507a Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 11 Sep 2014 02:49:08 +0100 Subject: ARM: 8148/1: flush TLS and thumbee register state during exec The TPIDRURO and TPIDRURW registers need to be flushed during exec; otherwise TLS information is potentially leaked. TPIDRURO in particular needs careful treatment. Since flush_thread basically needs the same code used to set the TLS in arm_syscall, pull that into a common set_tls helper in tls.h and use it in both places. Similarly, TEEHBR needs to be cleared during exec as well. Clearing its save slot in thread_info isn't right as there is no guarantee that a thread switch will occur before the new program runs. Just setting the register directly is sufficient. Signed-off-by: Nathan Lynch Acked-by: Will Deacon Cc: Signed-off-by: Russell King --- arch/arm/include/asm/tls.h | 62 ++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/process.c | 2 ++ arch/arm/kernel/thumbee.c | 2 +- arch/arm/kernel/traps.c | 17 +------------ 4 files changed, 66 insertions(+), 17 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 83259b8..36172ad 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -1,6 +1,9 @@ #ifndef __ASMARM_TLS_H #define __ASMARM_TLS_H +#include +#include + #ifdef __ASSEMBLY__ #include .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 @@ -50,6 +53,47 @@ #endif #ifndef __ASSEMBLY__ + +static inline void set_tls(unsigned long val) +{ + struct thread_info *thread; + + thread = current_thread_info(); + + thread->tp_value[0] = val; + + /* + * This code runs with preemption enabled and therefore must + * be reentrant with respect to switch_tls. + * + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + * + * If we're preempted here, switch_tls will load TPIDRURO from + * thread_info upon resuming execution and the following mcr + * is merely redundant. + */ + barrier(); + + if (!tls_emu) { + if (has_tls_reg) { + asm("mcr p15, 0, %0, c13, c0, 3" + : : "r" (val)); + } else { + /* + * User space must never try to access this + * directly. Expect your app to break + * eventually if you do so. The user helper + * at 0xffff0fe0 must be used instead. (see + * entry-armv.S for details) + */ + *((unsigned int *)0xffff0ff0) = val; + } + + } +} + static inline unsigned long get_tpuser(void) { unsigned long reg = 0; @@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void) return reg; } + +static inline void set_tpuser(unsigned long val) +{ + /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), + * we need not update thread_info. + */ + if (has_tls_reg && !tls_emu) { + asm("mcr p15, 0, %0, c13, c0, 2" + : : "r" (val)); + } +} + +static inline void flush_tls(void) +{ + set_tls(0); + set_tpuser(0); +} + #endif #endif /* __ASMARM_TLS_H */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 81ef686..a35f6eb 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -334,6 +334,8 @@ void flush_thread(void) memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); + flush_tls(); + thread_notify(THREAD_NOTIFY_FLUSH, thread); } diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 7b8403b..80f0d69 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c @@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void switch (cmd) { case THREAD_NOTIFY_FLUSH: - thread->thumbee_state = 0; + teehbr_write(0); break; case THREAD_NOTIFY_SWITCH: current_thread_info()->thumbee_state = teehbr_read(); diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c8e4bb7..a964c9f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags) #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) asmlinkage int arm_syscall(int no, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); siginfo_t info; if ((no >> 16) != (__ARM_NR_BASE>> 16)) @@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) return regs->ARM_r0; case NR(set_tls): - thread->tp_value[0] = regs->ARM_r0; - if (tls_emu) - return 0; - if (has_tls_reg) { - asm ("mcr p15, 0, %0, c13, c0, 3" - : : "r" (regs->ARM_r0)); - } else { - /* - * User space must never try to access this directly. - * Expect your app to break eventually if you do so. - * The user helper at 0xffff0fe0 must be used instead. - * (see entry-armv.S for details) - */ - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; - } + set_tls(regs->ARM_r0); return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG -- cgit v1.1 From 505013bc9065391f09a51d51cd3bf0b06dfb570a Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 11 Sep 2014 23:25:30 +0100 Subject: ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts Rob Clark reports a sleeping while atomic bug when using perf. BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8() DEBUG_LOCKS_WARN_ON(in_interrupt()) Modules linked in: CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819 [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (dump_stack+0x98/0xb8) [] (dump_stack) from [] (warn_slowpath_common+0x70/0x8c) [] (warn_slowpath_common) from [] (warn_slowpath_fmt+0x30/0x40) [] (warn_slowpath_fmt) from [] (mutex_lock_nested+0x3a0/0x3e8) [] (mutex_lock_nested) from [] (irq_find_host+0x20/0x9c) [] (irq_find_host) from [] (of_irq_get+0x28/0x48) [] (of_irq_get) from [] (platform_get_irq+0x1c/0x8c) [] (platform_get_irq) from [] (cpu_pmu_enable_percpu_irq+0x14/0x38) [] (cpu_pmu_enable_percpu_irq) from [] (flush_smp_call_function_queue+0x88/0x178) [] (flush_smp_call_function_queue) from [] (handle_IPI+0x88/0x160) [] (handle_IPI) from [] (gic_handle_irq+0x64/0x68) [] (gic_handle_irq) from [] (__irq_svc+0x44/0x5c) Exception stack(0xe63ddea0 to 0xe63ddee8) dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013 dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30 dee0: 20010013 ffffffff [] (__irq_svc) from [] (ktime_get_ts64+0x1c8/0x200) [] (ktime_get_ts64) from [] (poll_select_set_timeout+0x60/0xa8) [] (poll_select_set_timeout) from [] (SyS_select+0xa8/0x118) [] (SyS_select) from [] (ret_fast_syscall+0x0/0x48) ---[ end trace 0bb583b46342da6f ]--- INFO: lockdep is turned off. We don't really need to get the platform irq again when we're enabling or disabling the per-cpu irq. Furthermore, we don't really need to set and clear bits in the active_irqs bitmask because that's only used in the non-percpu irq case to figure out when the last CPU PMU has been disabled. Just pass the irq directly to the enable/disable functions to clean all this up. This should be slightly more efficient and also fix the scheduling while atomic bug. Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU" Reported-by: Rob Clark Acked-by: Will Deacon Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/perf_event_cpu.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edb..4bf4cce 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) static void cpu_pmu_enable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; enable_percpu_irq(irq, IRQ_TYPE_NONE); - cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); } static void cpu_pmu_disable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; - cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); disable_percpu_irq(irq); } @@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &percpu_pmu); } else { for (i = 0; i < irqs; ++i) { @@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irq); return err; } - on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { err = 0; -- cgit v1.1 From be26e0e0ef167f4aa1047ef14e8059a44262921a Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Mon, 8 Sep 2014 16:47:47 +0100 Subject: ARM: 8139/1: versatile: Enable DEBUG_LL_UART_PL01X This defconfig already enables DEBUG_LL and by default DEBUG_LL_UART_NONE will be selected (but due to some back compability magic I'd like to remove is not actually honoured). DEBUG_LL_UART_PL01X is a much saner default. Signed-off-by: Daniel Thompson Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/configs/versatile_defconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index d52b4ff..ea49d37 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig @@ -82,5 +82,6 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_USER=y CONFIG_DEBUG_LL=y +CONFIG_DEBUG_LL_UART_PL01X=y CONFIG_FONTS=y CONFIG_FONT_ACORN_8x8=y -- cgit v1.1 From 9f9ec08cf9cb8e8411b1d400d5c8d99d46c5c22b Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Mon, 8 Sep 2014 16:48:25 +0100 Subject: ARM: 8140/1: ep93xx: Enable DEBUG_LL_UART_PL01X This defconfig already enables DEBUG_LL and by default DEBUG_LL_UART_NONE will be selected (but due to some back compability magic I'd like to remove is not actually honoured). DEBUG_LL_UART_PL01X is a much saner default. Signed-off-by: Daniel Thompson Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/configs/ep93xx_defconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig index 1b650c8..72233b9 100644 --- a/arch/arm/configs/ep93xx_defconfig +++ b/arch/arm/configs/ep93xx_defconfig @@ -107,5 +107,6 @@ CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_USER=y CONFIG_DEBUG_LL=y +CONFIG_DEBUG_LL_UART_PL01X=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_LIBCRC32C=y -- cgit v1.1 From 7db143b89137de06ed289cf8b302f3bbbc5baa1f Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 16 Sep 2014 15:09:44 -0700 Subject: ARM: OMAP3: Fix I/O chain clock line assertion timed out error We are getting "PRM: I/O chain clock line assertion timed out" errors on early omaps for device tree based booting. This is because we are unconditionally calling reconfigure_io_chain while legacy booting has omap3_has_io_chain_ctrl() checks in place in omap_hwmod.c. For device tree based booting, we are calling reconfigure_io_chain unconditionally from pinctrl framework. So we need to add a check for omap3_has_io_chain_ctrl() to avoid the errors for trying to access a register that does not exist. For es3.0, the documentation in "4.11.2 Device Off-Mode Configuration" just mentions PM_WKEN_WKUP[8] bit. For es3.1, there's a new chapter in documentation for "4.11.2.2 I/O Wake-Up Mechanism" that describes the PM_WKEN_WKUP[16] ST_IO_CHAIN bit. So PM_WKEN_WKUP[16] bit did not get added until in es3.1 probaly to fix issues with flakey wake-up events. We are doing proper checks for ST_IO_CHAIN already in id.c and with omap3_has_io_chain_ctrl(). For more information, see also commit b02b917211d5 ("ARM: OMAP3: PM: fix I/O wakeup and I/O chain clock control detection"). Let's fix the issue by selecting the right function during init for reconfigure_io_chain depending on the omap revision. For es3.0 and earlier we need to just toggle EN_IO. By doing this, we can move the check for omap3_has_io_chain_ctrl() from omap_hwmod.c to the init code in prm_3xxx.c. And then we can unconditionally call reconfigure_io_chain. Thanks to Paul Walmsley and Nishanth Menon for help with debugging the issue. Fixes: 30a69ef785e8 ("ARM: OMAP: Move DT wake-up event handling over to use pinctrl-single-omap") Cc: Kevin Hilman Cc: Paul Walmsley Cc: Tero Kristo Reviewed-by: Nishanth Menon Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod.c | 2 +- arch/arm/mach-omap2/prm3xxx.c | 39 +++++++++++++++++++++++++++++++++++---- 2 files changed, 36 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 8fd87a3..9e91a4e 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2065,7 +2065,7 @@ static void _reconfigure_io_chain(void) spin_lock_irqsave(&io_chain_lock, flags); - if (cpu_is_omap34xx() && omap3_has_io_chain_ctrl()) + if (cpu_is_omap34xx()) omap3xxx_prm_reconfigure_io_chain(); else if (cpu_is_omap44xx()) omap44xx_prm_reconfigure_io_chain(); diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 2458be6..372de3e 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -45,7 +45,7 @@ static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { .ocp_barrier = &omap3xxx_prm_ocp_barrier, .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, .restore_irqen = &omap3xxx_prm_restore_irqen, - .reconfigure_io_chain = &omap3xxx_prm_reconfigure_io_chain, + .reconfigure_io_chain = NULL, }; /* @@ -369,15 +369,30 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) } /** - * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain + * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain + * + * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only + * thing we can do is toggle EN_IO bit for earlier omaps. + */ +void omap3430_pre_es3_1_reconfigure_io_chain(void) +{ + omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, + PM_WKEN); + omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, + PM_WKEN); + omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); +} + +/** + * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain * * Clear any previously-latched I/O wakeup events and ensure that the * I/O wakeup gates are aligned with the current mux settings. Works * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No - * return value. + * return value. These registers are only available in 3430 es3.1 and later. */ -void omap3xxx_prm_reconfigure_io_chain(void) +void omap3_prm_reconfigure_io_chain(void) { int i = 0; @@ -400,6 +415,15 @@ void omap3xxx_prm_reconfigure_io_chain(void) } /** + * omap3xxx_prm_reconfigure_io_chain - reconfigure I/O chain + */ +void omap3xxx_prm_reconfigure_io_chain(void) +{ + if (omap3_prcm_irq_setup.reconfigure_io_chain) + omap3_prcm_irq_setup.reconfigure_io_chain(); +} + +/** * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches * * Activates the I/O wakeup event latches and allows events logged by @@ -656,6 +680,13 @@ static int omap3xxx_prm_late_init(void) if (!(prm_features & PRM_HAS_IO_WAKEUP)) return 0; + if (omap3_has_io_chain_ctrl()) + omap3_prcm_irq_setup.reconfigure_io_chain = + omap3_prm_reconfigure_io_chain; + else + omap3_prcm_irq_setup.reconfigure_io_chain = + omap3430_pre_es3_1_reconfigure_io_chain; + omap3xxx_prm_enable_io_wakeup(); ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); if (!ret) -- cgit v1.1 From 9e1ac462b982f496ed3b491f02c417dcc8e40347 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Tue, 16 Sep 2014 09:35:33 +0800 Subject: ARM: imx: fix .is_enabled() of shared gate clock Commit 63288b721a80 ("ARM: imx: fix shared gate clock") attempted to fix an issue with particular enable/disable sequence from two shared gate clocks. But unfortunately, while it partially fixed the issue, it also did something wrong in .is_enabled() function hook. In case of shared gate, the function shouldn't really query the hardware state via share_count, because the function is trying to query the enabling state of the clock in question, not the hardware state which is shared by multiple clocks. Fix the issue by returning the enable_count of the clock itself which is maintained by clock core, in case it's a clock sharing hardware gate with others. As the result, the initialization of share_count per hardware state is not needed now. So remove it. Reported-by: Fabio Estevam Fixes: 63288b721a80 ("ARM: imx: fix shared gate clock") Cc: Signed-off-by: Shawn Guo Tested-by: Fabio Estevam Signed-off-by: Olof Johansson --- arch/arm/mach-imx/clk-gate2.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c index 84acdfd..5a75cdc 100644 --- a/arch/arm/mach-imx/clk-gate2.c +++ b/arch/arm/mach-imx/clk-gate2.c @@ -97,7 +97,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw) struct clk_gate2 *gate = to_clk_gate2(hw); if (gate->share_count) - return !!(*gate->share_count); + return !!__clk_get_enable_count(hw->clk); else return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx); } @@ -127,10 +127,6 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, gate->bit_idx = bit_idx; gate->flags = clk_gate2_flags; gate->lock = lock; - - /* Initialize share_count per hardware state */ - if (share_count) - *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0; gate->share_count = share_count; init.name = name; -- cgit v1.1 From 2c553ac19e73235b61b67fdc5c14be9913a2758d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 25 Sep 2014 11:39:19 +0100 Subject: ARM: 8164/1: mm: clear SCTLR.HA instead of setting it for LPAE SCTLR.HA (hardware access flag) is deprecated and not actually implemented by any CPUs. Furthermore, it can confuse cr_alignment checks where the whole value of SCTLR is compared against the value sitting in the hardware, since the bit is actually RAZ/WI and will not match the saved cr_alignment value. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/proc-v7-3level.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index b64e67c..d3daed0 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -157,9 +157,9 @@ ENDPROC(cpu_v7_set_pte_ext) * TFR EV X F IHD LR S * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced - * 11 0 110 1 0011 1100 .111 1101 < we want + * 11 0 110 0 0011 1100 .111 1101 < we want */ .align 2 .type v7_crval, #object v7_crval: - crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c + crval clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c -- cgit v1.1 From 5ca918e5e3f9df4634077c06585c42bc6a8d699a Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 25 Sep 2014 11:56:19 +0100 Subject: ARM: 8165/1: alignment: don't break misaligned NEON load/store The alignment fixup incorrectly decodes faulting ARM VLDn/VSTn instructions (where the optional alignment hint is given but incorrect) as LDR/STR, leading to register corruption. Detect these and correctly treat them as unhandled, so that userspace gets the fault it expects. Reported-by: Simon Hosie Signed-off-by: Robin Murphy Cc: Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 0c1ab49..83792f4 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -41,6 +41,7 @@ * This code is not portable to processors with late data abort handling. */ #define CODING_BITS(i) (i & 0x0e000000) +#define COND_BITS(i) (i & 0xf0000000) #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ @@ -821,6 +822,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) break; case 0x04000000: /* ldr or str immediate */ + if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */ + goto bad; offset.un = OFFSET_BITS(instr); handler = do_alignment_ldrstr; break; -- cgit v1.1 From 8b521cb2947d8811b4cf7fc6a7a6ebde35218243 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 16 Sep 2014 20:41:43 +0100 Subject: ARM: 8152/1: Convert pr_warning to pr_warn Use the more common pr_warn. Other miscellanea: o Coalesce formats o Realign arguments Signed-off-by: Joe Perches Signed-off-by: Russell King --- arch/arm/include/asm/syscall.h | 8 ++++---- arch/arm/kernel/atags_parse.c | 2 +- arch/arm/kernel/hw_breakpoint.c | 18 +++++++++--------- arch/arm/kernel/irq.c | 4 ++-- arch/arm/kernel/perf_event_cpu.c | 4 ++-- arch/arm/kernel/smp.c | 2 +- arch/arm/kernel/unwind.c | 24 ++++++++++++------------ arch/arm/mm/idmap.c | 2 +- arch/arm/mm/mmu.c | 4 ++-- 9 files changed, 34 insertions(+), 34 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 4651f69..e86c985 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -63,8 +63,8 @@ static inline void syscall_get_arguments(struct task_struct *task, if (i + n > SYSCALL_MAX_ARGS) { unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; - pr_warning("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); + pr_warn("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); memset(args_bad, 0, n_bad * sizeof(args[0])); n = SYSCALL_MAX_ARGS - i; } @@ -88,8 +88,8 @@ static inline void syscall_set_arguments(struct task_struct *task, return; if (i + n > SYSCALL_MAX_ARGS) { - pr_warning("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); + pr_warn("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); n = SYSCALL_MAX_ARGS - i; } diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 7807ef5..528f8af 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -130,7 +130,7 @@ static int __init parse_tag_cmdline(const struct tag *tag) strlcat(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); #elif defined(CONFIG_CMDLINE_FORCE) - pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); + pr_warn("Ignoring tag cmdline (using the default kernel command line)\n"); #else strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 4d963fb..b5b452f 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -113,8 +113,8 @@ static u32 read_wb_reg(int n) GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); default: - pr_warning("attempt to read from unknown breakpoint " - "register %d\n", n); + pr_warn("attempt to read from unknown breakpoint register %d\n", + n); } return val; @@ -128,8 +128,8 @@ static void write_wb_reg(int n, u32 val) GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); default: - pr_warning("attempt to write to unknown breakpoint " - "register %d\n", n); + pr_warn("attempt to write to unknown breakpoint register %d\n", + n); } isb(); } @@ -292,7 +292,7 @@ int hw_breakpoint_slots(int type) case TYPE_DATA: return get_num_wrps(); default: - pr_warning("unknown slot type: %d\n", type); + pr_warn("unknown slot type: %d\n", type); return 0; } } @@ -365,7 +365,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) } if (i == max_slots) { - pr_warning("Can't find any breakpoint slot\n"); + pr_warn("Can't find any breakpoint slot\n"); return -EBUSY; } @@ -417,7 +417,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) } if (i == max_slots) { - pr_warning("Can't find any breakpoint slot\n"); + pr_warn("Can't find any breakpoint slot\n"); return; } @@ -894,8 +894,8 @@ static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) { int cpu = smp_processor_id(); - pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", - instr, cpu); + pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n", + instr, cpu); /* Set the error flag for this CPU and skip the faulting instruction. */ cpumask_set_cpu(cpu, &debug_err_mask); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c42576..40cbca6 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -205,8 +205,8 @@ void migrate_irqs(void) raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) - pr_warning("IRQ%u no longer affine to CPU%u\n", i, - smp_processor_id()); + pr_warn("IRQ%u no longer affine to CPU%u\n", + i, smp_processor_id()); } local_irq_restore(flags); diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edb..101dbab 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -152,8 +152,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) * continue. Otherwise, continue without this interrupt. */ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); continue; } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index d19aea4..39c74a2 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -646,7 +646,7 @@ void smp_send_stop(void) udelay(1); if (num_online_cpus() > 1) - pr_warning("SMP: failed to stop secondary CPUs\n"); + pr_warn("SMP: failed to stop secondary CPUs\n"); } /* diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index a61a1df..cbb85c5 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -157,7 +157,7 @@ static const struct unwind_idx *search_index(unsigned long addr, if (likely(start->addr_offset <= addr_prel31)) return start; else { - pr_warning("unwind: Unknown symbol address %08lx\n", addr); + pr_warn("unwind: Unknown symbol address %08lx\n", addr); return NULL; } } @@ -225,7 +225,7 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) unsigned long ret; if (ctrl->entries <= 0) { - pr_warning("unwind: Corrupt unwind table\n"); + pr_warn("unwind: Corrupt unwind table\n"); return 0; } @@ -333,8 +333,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) insn = (insn << 8) | unwind_get_byte(ctrl); mask = insn & 0x0fff; if (mask == 0) { - pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n", - insn); + pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n", + insn); return -URC_FAILURE; } @@ -357,8 +357,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) unsigned long mask = unwind_get_byte(ctrl); if (mask == 0 || mask & 0xf0) { - pr_warning("unwind: Spare encoding %04lx\n", - (insn << 8) | mask); + pr_warn("unwind: Spare encoding %04lx\n", + (insn << 8) | mask); return -URC_FAILURE; } @@ -370,7 +370,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { - pr_warning("unwind: Unhandled instruction %02lx\n", insn); + pr_warn("unwind: Unhandled instruction %02lx\n", insn); return -URC_FAILURE; } @@ -403,7 +403,7 @@ int unwind_frame(struct stackframe *frame) idx = unwind_find_idx(frame->pc); if (!idx) { - pr_warning("unwind: Index not found %08lx\n", frame->pc); + pr_warn("unwind: Index not found %08lx\n", frame->pc); return -URC_FAILURE; } @@ -422,8 +422,8 @@ int unwind_frame(struct stackframe *frame) /* only personality routine 0 supported in the index */ ctrl.insn = &idx->insn; else { - pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n", - idx->insn, idx); + pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n", + idx->insn, idx); return -URC_FAILURE; } @@ -435,8 +435,8 @@ int unwind_frame(struct stackframe *frame) ctrl.byte = 1; ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); } else { - pr_warning("unwind: Unsupported personality routine %08lx at %p\n", - *ctrl.insn, ctrl.insn); + pr_warn("unwind: Unsupported personality routine %08lx at %p\n", + *ctrl.insn, ctrl.insn); return -URC_FAILURE; } diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index c447ec7..e7a81ceb 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -27,7 +27,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { pmd = pmd_alloc_one(&init_mm, addr); if (!pmd) { - pr_warning("Failed to allocate identity pmd.\n"); + pr_warn("Failed to allocate identity pmd.\n"); return; } /* diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 8348ed6..9f98cec 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -223,13 +223,13 @@ early_param("ecc", early_ecc); static int __init early_cachepolicy(char *p) { - pr_warning("cachepolicy kernel parameter not supported without cp15\n"); + pr_warn("cachepolicy kernel parameter not supported without cp15\n"); } early_param("cachepolicy", early_cachepolicy); static int __init noalign_setup(char *__unused) { - pr_warning("noalign kernel parameter not supported without cp15\n"); + pr_warn("noalign kernel parameter not supported without cp15\n"); } __setup("noalign", noalign_setup); -- cgit v1.1 From 195b58add463f697fb802ed55e26759094d40a54 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 28 Aug 2014 13:08:14 +0100 Subject: ARM: Avoid writing to control register on every exception If we are not changing the control register value, avoid writing to it. Writes to the control register can be very expensive, taking around a hundred cycles or so. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 10 +++++++--- arch/arm/kernel/entry-common.S | 2 +- arch/arm/kernel/entry-header.S | 16 ++++++++++++---- 3 files changed, 20 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 36276cd..3fe61de 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -321,6 +321,9 @@ ENDPROC(__pabt_svc) ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) + ATRAP( mrc p15, 0, r7, c1, c0, 0) + ATRAP( ldr r8, .LCcralign) + ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" @@ -328,6 +331,8 @@ ENDPROC(__pabt_svc) str r3, [sp] @ save the "real" r0 copied @ from the exception stack + ATRAP( ldr r8, [r8, #0]) + @ @ We are now ready to fill in the remaining blanks on the stack: @ @@ -341,10 +346,9 @@ ENDPROC(__pabt_svc) ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) - @ @ Enable the alignment trap while in kernel mode - @ - alignment_trap r0, .LCcralign + ATRAP( teq r8, r7) + ATRAP( mcrne p15, 0, r8, c1, c0, 0) @ @ Clear FP to mark the first stack frame diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e52fe5a..6bb09d4 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -366,7 +366,7 @@ ENTRY(vector_swi) str r0, [sp, #S_OLD_R0] @ Save OLD_R0 #endif zero_fp - alignment_trap ip, __cr_alignment + alignment_trap r10, ip, __cr_alignment enable_irq ct_user_exit get_thread_info tsk diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 8db307d..7b72939 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -37,11 +37,19 @@ #endif .endm - .macro alignment_trap, rtemp, label #ifdef CONFIG_ALIGNMENT_TRAP - ldr \rtemp, \label - ldr \rtemp, [\rtemp] - mcr p15, 0, \rtemp, c1, c0 +#define ATRAP(x...) x +#else +#define ATRAP(x...) +#endif + + .macro alignment_trap, rtmp1, rtmp2, label +#ifdef CONFIG_ALIGNMENT_TRAP + mrc p15, 0, \rtmp2, c1, c0, 0 + ldr \rtmp1, \label + ldr \rtmp1, [\rtmp1] + teq \rtmp1, \rtmp2 + mcrne p15, 0, \rtmp1, c1, c0, 0 #endif .endm -- cgit v1.1 From 75c349062a666deab57bdca8b5bd0779c9fb0d58 Mon Sep 17 00:00:00 2001 From: Vincent Sanders Date: Thu, 18 Sep 2014 20:39:15 +0100 Subject: ARM: 8153/1: Enable gcov support on the ARM architecture Enable gcov support for ARM based on original patches by David Singleton and George G. Davis Riku - updated to patch to current mainline kernel. The patch has been submitted in 2010, 2012 - for symmetry, now in 2014 too. https://lwn.net/Articles/390419/ http://marc.info/?l=linux-arm-kernel&m=133823081813044 v2: remove arch/arm/kernel from gcov disabled files Cc: Andrey Ryabinin Cc: Naresh Kamboju Acked-by: Arnd Bergmann Signed-off-by: Riku Voipio Signed-off-by: Vincent Sanders Signed-off-by: Russell King --- arch/arm/boot/bootp/Makefile | 2 ++ arch/arm/boot/compressed/Makefile | 2 ++ 2 files changed, 4 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile index c394e30..5761f00 100644 --- a/arch/arm/boot/bootp/Makefile +++ b/arch/arm/boot/bootp/Makefile @@ -5,6 +5,8 @@ # architecture-specific flags and dependencies. # +GCOV_PROFILE := n + LDFLAGS_bootp :=-p --no-undefined -X \ --defsym initrd_phys=$(INITRD_PHYS) \ --defsym params_phys=$(PARAMS_PHYS) -T diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 76a50ec..3ea230a 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -37,6 +37,8 @@ ifeq ($(CONFIG_ARM_VIRT_EXT),y) OBJS += hyp-stub.o endif +GCOV_PROFILE := n + # # Architecture dependencies # -- cgit v1.1 From 02e0409a65560da66a747d2ac6023715b04659ea Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 22 Sep 2014 22:08:42 +0100 Subject: ARM: 8154/1: use _install_special_mapping for sigpage _install_special_mapping allows the VMA to be identifed in /proc/pid/maps without the use of arch_vma_name, providing a slight net reduction in object size: text data bss dec hex filename 2996 96 144 3236 ca4 arch/arm/kernel/process.o (before) 2956 104 144 3204 c84 arch/arm/kernel/process.o (after) Signed-off-by: Nathan Lynch Reviewed-by: Kees Cook Signed-off-by: Russell King --- arch/arm/kernel/process.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 81ef686..46fbbb3 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -472,19 +472,23 @@ int in_gate_area_no_mm(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { - return is_gate_vma(vma) ? "[vectors]" : - (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? - "[sigpage]" : NULL; + return is_gate_vma(vma) ? "[vectors]" : NULL; } static struct page *signal_page; extern struct page *get_signal_page(void); +static const struct vm_special_mapping sigpage_mapping = { + .name = "[sigpage]", + .pages = &signal_page, +}; + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; unsigned long addr; - int ret; + int ret = 0; if (!signal_page) signal_page = get_signal_page(); @@ -498,12 +502,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto up_fail; } - ret = install_special_mapping(mm, addr, PAGE_SIZE, + vma = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - &signal_page); + &sigpage_mapping); + + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto up_fail; + } - if (ret == 0) - mm->context.sigpage = addr; + mm->context.sigpage = addr; up_fail: up_write(&mm->mmap_sem); -- cgit v1.1 From 389522b0c0530658eb9f9a53410ec2494616d785 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 22 Sep 2014 22:12:35 +0100 Subject: ARM: 8155/1: place sigpage at a random offset above stack The sigpage is currently placed alongside shared libraries etc in the address space. Similar to what x86_64 does for its VDSO, place the sigpage at a randomized offset above the stack so that learning the base address of the sigpage doesn't help expose where shared libraries are loaded in the address space (and vice versa). Signed-off-by: Nathan Lynch Reviewed-by: Kees Cook Signed-off-by: Russell King --- arch/arm/kernel/process.c | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 46fbbb3..edd2ac3 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -475,6 +475,39 @@ const char *arch_vma_name(struct vm_area_struct *vma) return is_gate_vma(vma) ? "[vectors]" : NULL; } +/* If possible, provide a placement hint at a random offset from the + * stack for the signal page. + */ +static unsigned long sigpage_addr(const struct mm_struct *mm, + unsigned int npages) +{ + unsigned long offset; + unsigned long first; + unsigned long last; + unsigned long addr; + unsigned int slots; + + first = PAGE_ALIGN(mm->start_stack); + + last = TASK_SIZE - (npages << PAGE_SHIFT); + + /* No room after stack? */ + if (first > last) + return 0; + + /* Just enough room? */ + if (first == last) + return first; + + slots = ((last - first) >> PAGE_SHIFT) + 1; + + offset = get_random_int() % slots; + + addr = first + (offset << PAGE_SHIFT); + + return addr; +} + static struct page *signal_page; extern struct page *get_signal_page(void); @@ -488,6 +521,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr; + unsigned long hint; int ret = 0; if (!signal_page) @@ -496,7 +530,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -ENOMEM; down_write(&mm->mmap_sem); - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + hint = sigpage_addr(mm, 1); + addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; -- cgit v1.1 From aeea3592a13bf12861943e44fc48f1f270941f8d Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Wed, 24 Sep 2014 01:06:46 +0100 Subject: ARM: 8158/1: LLVMLinux: use static inline in ARM ftrace.h With compilers which follow the C99 standard (like modern versions of gcc and clang), "extern inline" does the wrong thing (emits code for an externally linkable version of the inline function). In this case using static inline and removing the NULL version of return_address in return_address.c does the right thing. Signed-off-by: Behan Webster Reviewed-by: Mark Charlebois Acked-by: Steven Rostedt Signed-off-by: Russell King --- arch/arm/include/asm/ftrace.h | 2 +- arch/arm/kernel/return_address.c | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 39eb16b..bfe2a2f 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -45,7 +45,7 @@ void *return_address(unsigned int); #else -extern inline void *return_address(unsigned int level) +static inline void *return_address(unsigned int level) { return NULL; } diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index fafedd8..f6aa84d 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -63,11 +63,6 @@ void *return_address(unsigned int level) #warning "TODO: return_address should use unwind tables" #endif -void *return_address(unsigned int level) -{ - return NULL; -} - #endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */ EXPORT_SYMBOL_GPL(return_address); -- cgit v1.1 From c3c963e1fb1c43641831595f53aa23f34dcaed3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 24 Sep 2014 09:01:19 +0100 Subject: ARM: 8161/1: footbridge: select machine dir based on ARCH_FOOTBRIDGE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Syntactically FOOTBRIDGE and ARCH_FOOTBRIDGE are identical (the former is defined in an if ARCH_FOOTBRIDGE block and the latter selects the former). Sematically FOOTBRIDGE means "we have a DC21285 (aka footbridge) device in the system" and ARCH_FOOTBRIDGE is the support for boards with a footbridge device, so ARCH_FOOTBRIDGE is the better symbol here. Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 0ce9d0f..12bfc1f 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -157,6 +157,7 @@ machine-$(CONFIG_ARCH_EBSA110) += ebsa110 machine-$(CONFIG_ARCH_EFM32) += efm32 machine-$(CONFIG_ARCH_EP93XX) += ep93xx machine-$(CONFIG_ARCH_EXYNOS) += exynos +machine-$(CONFIG_ARCH_FOOTBRIDGE) += footbridge machine-$(CONFIG_ARCH_GEMINI) += gemini machine-$(CONFIG_ARCH_HIGHBANK) += highbank machine-$(CONFIG_ARCH_HISI) += hisi @@ -205,7 +206,6 @@ machine-$(CONFIG_ARCH_VEXPRESS) += vexpress machine-$(CONFIG_ARCH_VT8500) += vt8500 machine-$(CONFIG_ARCH_W90X900) += w90x900 machine-$(CONFIG_ARCH_ZYNQ) += zynq -machine-$(CONFIG_FOOTBRIDGE) += footbridge machine-$(CONFIG_PLAT_SPEAR) += spear # Platform directory name. This list is sorted alphanumerically -- cgit v1.1 From e16343c47e4276f5ebc77ca16feb5e50ca1918f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 24 Sep 2014 08:51:57 +0100 Subject: ARM: 8160/1: drop warning about return_address not using unwind tables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The warning was introduced in 2009 (commit 4bf1fa5a34aa ([ARM] 5613/1: implement CALLER_ADDRESSx)). The only "problem" here is that CALLER_ADDRESSx for x > 1 returns NULL which doesn't do much harm. The drawback of implementing a fix (i.e. use unwind tables to implement CALLER_ADDRESSx) is that much of the unwinder code would need to be marked as not traceable. Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/kernel/return_address.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index f6aa84d..98ea4b7 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -59,10 +59,6 @@ void *return_address(unsigned int level) #else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ -#if defined(CONFIG_ARM_UNWIND) -#warning "TODO: return_address should use unwind tables" -#endif - #endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */ EXPORT_SYMBOL_GPL(return_address); -- cgit v1.1 From ebc77251a4c50d7c310893915fb07581550c1ba5 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sun, 28 Sep 2014 05:36:46 +0100 Subject: ARM: 8177/1: cacheflush: Fix v7_exit_coherency_flush exynos build breakage on ARMv6 This fixes build breakage of platsmp.c if ARMv6 was chosen for compile time options (e.g. by building allmodconfig): $ make allmodconfig $ make CC arch/arm/mach-exynos/platsmp.o /tmp/ccdQM0Eg.s: Assembler messages: /tmp/ccdQM0Eg.s:432: Error: selected processor does not support ARM mode `isb ' /tmp/ccdQM0Eg.s:437: Error: selected processor does not support ARM mode `isb ' /tmp/ccdQM0Eg.s:438: Error: selected processor does not support ARM mode `dsb ' make[1]: *** [arch/arm/mach-exynos/platsmp.o] Error 1 The error was introduced in commit "ARM: EXYNOS: Move code from hotplug.c to platsmp.c". Previously code using v7_exit_coherency_flush() macro was built with '-march=armv7-a' flag but this flag dissapeared during the movement. Fix this by annotating the v7_exit_coherency_flush() asm code with armv7-a architecture. Signed-off-by: Krzysztof Kozlowski Reported-by: Mark Brown Acked-by: Nicolas Pitre Signed-off-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 79ecb4f..10e78d0 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -466,6 +466,7 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) */ #define v7_exit_coherency_flush(level) \ asm volatile( \ + ".arch armv7-a \n\t" \ "stmfd sp!, {fp, ip} \n\t" \ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ "bic r0, r0, #"__stringify(CR_C)" \n\t" \ -- cgit v1.1 From 9cc6d9e5daaa147a9a3e31557efcb331989e77be Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 29 Sep 2014 19:11:36 +0100 Subject: ARM: 8178/1: fix set_tls for !CONFIG_KUSER_HELPERS Joachim Eastwood reports that commit fbfb872f5f41 "ARM: 8148/1: flush TLS and thumbee register state during exec" causes a boot-time crash on a Cortex-M4 nommu system: Freeing unused kernel memory: 68K (281e5000 - 281f6000) Unhandled exception: IPSR = 00000005 LR = fffffff1 CPU: 0 PID: 1 Comm: swapper Not tainted 3.17.0-rc6-00313-gd2205fa30aa7 #191 task: 29834000 ti: 29832000 task.ti: 29832000 PC is at flush_thread+0x2e/0x40 LR is at flush_thread+0x21/0x40 pc : [<2800954a>] lr : [<2800953d>] psr: 4100000b sp : 29833d60 ip : 00000000 fp : 00000001 r10: 00003cf8 r9 : 29b1f000 r8 : 00000000 r7 : 29b0bc00 r6 : 29834000 r5 : 29832000 r4 : 29832000 r3 : ffff0ff0 r2 : 29832000 r1 : 00000000 r0 : 282121f0 xPSR: 4100000b CPU: 0 PID: 1 Comm: swapper Not tainted 3.17.0-rc6-00313-gd2205fa30aa7 #191 [<2800afa5>] (unwind_backtrace) from [<2800a327>] (show_stack+0xb/0xc) [<2800a327>] (show_stack) from [<2800a963>] (__invalid_entry+0x4b/0x4c) The problem is that set_tls is attempting to clear the TLS location in the kernel-user helper page, which isn't set up on V7M. Fix this by guarding the write to the kuser helper page with a CONFIG_KUSER_HELPERS ifdef. Fixes: fbfb872f5f41 ARM: 8148/1: flush TLS and thumbee register state during exec Reported-by: Joachim Eastwood Tested-by: Joachim Eastwood Cc: stable@vger.kernel.org Signed-off-by: Nathan Lynch Signed-off-by: Russell King --- arch/arm/include/asm/tls.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 36172ad..5f833f7 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -81,6 +81,7 @@ static inline void set_tls(unsigned long val) asm("mcr p15, 0, %0, c13, c0, 3" : : "r" (val)); } else { +#ifdef CONFIG_KUSER_HELPERS /* * User space must never try to access this * directly. Expect your app to break @@ -89,6 +90,7 @@ static inline void set_tls(unsigned long val) * entry-armv.S for details) */ *((unsigned int *)0xffff0ff0) = val; +#endif } } -- cgit v1.1 From ad684dce87fac52738649e62b4afa25081b52a28 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Tue, 30 Sep 2014 10:25:10 +0100 Subject: ARM: 8179/1: kprobes-test: Fix compile error "bad immediate value for offset" When compiling kprobes-test-arm.c the following error has been observed /tmp/ccoT403o.s:21439: Error: bad immediate value for offset (4168) This is caused by the compiler spilling it's literal pool too far away from the site which is trying to reference it with a PC relative load. This arises because the compiler is underestimating the size of the inline assembler code present, which apparently it approximates as 4 bytes per line or instruction. We fix this problem by moving the operations which generate more than 4 bytes out of the text section. Specifically, moving the .ascii directives to the .rodata section. Signed-off-by: Jon Medhurst Signed-off-by: Russell King --- arch/arm/kernel/kprobes-test.c | 16 +++++++++------- arch/arm/kernel/kprobes-test.h | 5 ++++- 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c index 08d7312..b206d77 100644 --- a/arch/arm/kernel/kprobes-test.c +++ b/arch/arm/kernel/kprobes-test.c @@ -110,10 +110,13 @@ * * @ TESTCASE_START * bl __kprobes_test_case_start - * @ start of inline data... + * .pushsection .rodata + * "10: * .ascii "mov r0, r7" @ text title for test case * .byte 0 - * .align 2, 0 + * .popsection + * @ start of inline data... + * .word 10b @ pointer to title in .rodata section * * @ TEST_ARG_REG * .byte ARG_TYPE_REG @@ -971,7 +974,7 @@ void __naked __kprobes_test_case_start(void) __asm__ __volatile__ ( "stmdb sp!, {r4-r11} \n\t" "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "bic r0, lr, #1 @ r0 = inline title string \n\t" + "bic r0, lr, #1 @ r0 = inline data \n\t" "mov r1, sp \n\t" "bl kprobes_test_case_start \n\t" "bx r0 \n\t" @@ -1349,15 +1352,14 @@ static unsigned long next_instruction(unsigned long pc) return pc + 4; } -static uintptr_t __used kprobes_test_case_start(const char *title, void *stack) +static uintptr_t __used kprobes_test_case_start(const char **title, void *stack) { struct test_arg *args; struct test_arg_end *end_arg; unsigned long test_code; - args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4); - - current_title = title; + current_title = *title++; + args = (struct test_arg *)title; current_args = args; current_stack = stack; diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h index eecc90a..4430990 100644 --- a/arch/arm/kernel/kprobes-test.h +++ b/arch/arm/kernel/kprobes-test.h @@ -111,11 +111,14 @@ struct test_arg_end { #define TESTCASE_START(title) \ __asm__ __volatile__ ( \ "bl __kprobes_test_case_start \n\t" \ + ".pushsection .rodata \n\t" \ + "10: \n\t" \ /* don't use .asciz here as 'title' may be */ \ /* multiple strings to be concatenated. */ \ ".ascii "#title" \n\t" \ ".byte 0 \n\t" \ - ".align 2, 0 \n\t" + ".popsection \n\t" \ + ".word 10b \n\t" #define TEST_ARG_REG(reg, val) \ ".byte "__stringify(ARG_TYPE_REG)" \n\t" \ -- cgit v1.1 From f3354ab67476dc800463df32e33423158003d80b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 26 Sep 2014 09:01:58 +0100 Subject: ARM: 8169/1: l2c: parse cache properties from ePAPR definitions When both 'cache-size' and 'cache-sets' are specified for a L2 cache controller node, parse those properties and set up the set size based on which type of L2 cache controller we are using. Update the L2 cache controller Device Tree binding with the optional 'cache-size', 'cache-sets', 'cache-block-size' and 'cache-line-size' properties. These come from the ePAPR specification. Using the cache size, number of sets and cache line size we can calculate desired associativity of the L2 cache. This is done by the calculation: set size = cache size / sets ways = set size / line size way size = cache size / ways = sets * line size associativity = cache size / way size Example output from the PB1176 DT that look like this: L2: l2-cache { compatible = "arm,l220-cache"; (...) arm,override-auxreg; cache-size = <131072>; // 128kB cache-sets = <512>; cache-line-size = <32>; }; Ends up like this: L2C OF: override cache size: 131072 bytes (128KB) L2C OF: override line size: 32 bytes L2C OF: override way size: 16384 bytes (16KB) L2C OF: override associativity: 8 L2C: DT/platform modifies aux control register: 0x02020fff -> 0x02030fff L2C-220 cache controller enabled, 8 ways, 128 kB L2C-220: CACHE_ID 0x41000486, AUX_CTRL 0x06030fff Which is consistent with the value earlier hardcoded for the PB1176 platform. This patch is an extended version based on the initial patch by Florian Fainelli. Reviewed-by: Arnd Bergmann Signed-off-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 121 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 5f2c988..55f9d6e 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -945,6 +946,98 @@ static int l2_wt_override; * pass it though the device tree */ static u32 cache_id_part_number_from_dt; +/** + * l2x0_cache_size_of_parse() - read cache size parameters from DT + * @np: the device tree node for the l2 cache + * @aux_val: pointer to machine-supplied auxilary register value, to + * be augmented by the call (bits to be set to 1) + * @aux_mask: pointer to machine-supplied auxilary register mask, to + * be augmented by the call (bits to be set to 0) + * @associativity: variable to return the calculated associativity in + * @max_way_size: the maximum size in bytes for the cache ways + */ +static void __init l2x0_cache_size_of_parse(const struct device_node *np, + u32 *aux_val, u32 *aux_mask, + u32 *associativity, + u32 max_way_size) +{ + u32 mask = 0, val = 0; + u32 cache_size = 0, sets = 0; + u32 way_size_bits = 1; + u32 way_size = 0; + u32 block_size = 0; + u32 line_size = 0; + + of_property_read_u32(np, "cache-size", &cache_size); + of_property_read_u32(np, "cache-sets", &sets); + of_property_read_u32(np, "cache-block-size", &block_size); + of_property_read_u32(np, "cache-line-size", &line_size); + + if (!cache_size || !sets) + return; + + /* All these l2 caches have the same line = block size actually */ + if (!line_size) { + if (block_size) { + /* If linesize if not given, it is equal to blocksize */ + line_size = block_size; + } else { + /* Fall back to known size */ + pr_warn("L2C OF: no cache block/line size given: " + "falling back to default size %d bytes\n", + CACHE_LINE_SIZE); + line_size = CACHE_LINE_SIZE; + } + } + + if (line_size != CACHE_LINE_SIZE) + pr_warn("L2C OF: DT supplied line size %d bytes does " + "not match hardware line size of %d bytes\n", + line_size, + CACHE_LINE_SIZE); + + /* + * Since: + * set size = cache size / sets + * ways = cache size / (sets * line size) + * way size = cache size / (cache size / (sets * line size)) + * way size = sets * line size + * associativity = ways = cache size / way size + */ + way_size = sets * line_size; + *associativity = cache_size / way_size; + + if (way_size > max_way_size) { + pr_err("L2C OF: set size %dKB is too large\n", way_size); + return; + } + + pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", + cache_size, cache_size >> 10); + pr_info("L2C OF: override line size: %d bytes\n", line_size); + pr_info("L2C OF: override way size: %d bytes (%dKB)\n", + way_size, way_size >> 10); + pr_info("L2C OF: override associativity: %d\n", *associativity); + + /* + * Calculates the bits 17:19 to set for way size: + * 512KB -> 6, 256KB -> 5, ... 16KB -> 1 + */ + way_size_bits = ilog2(way_size >> 10) - 3; + if (way_size_bits < 1 || way_size_bits > 6) { + pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", + way_size); + return; + } + + mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; + val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT); + + *aux_val &= ~mask; + *aux_val |= val; + *aux_mask &= ~mask; +} + static void __init l2x0_of_parse(const struct device_node *np, u32 *aux_val, u32 *aux_mask) { @@ -952,6 +1045,7 @@ static void __init l2x0_of_parse(const struct device_node *np, u32 tag = 0; u32 dirty = 0; u32 val = 0, mask = 0; + u32 assoc; of_property_read_u32(np, "arm,tag-latency", &tag); if (tag) { @@ -974,6 +1068,15 @@ static void __init l2x0_of_parse(const struct device_node *np, val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; } + l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); + if (assoc > 8) { + pr_err("l2x0 of: cache setting yield too high associativity\n"); + pr_err("l2x0 of: %d calculated, max 8\n", assoc); + } else { + mask |= L2X0_AUX_CTRL_ASSOC_MASK; + val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT); + } + *aux_val &= ~mask; *aux_val |= val; *aux_mask &= ~mask; @@ -1021,6 +1124,7 @@ static void __init l2c310_of_parse(const struct device_node *np, u32 data[3] = { 0, 0, 0 }; u32 tag[3] = { 0, 0, 0 }; u32 filter[2] = { 0, 0 }; + u32 assoc; of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); if (tag[0] && tag[1] && tag[2]) @@ -1047,6 +1151,23 @@ static void __init l2c310_of_parse(const struct device_node *np, writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, l2x0_base + L310_ADDR_FILTER_START); } + + l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); + switch (assoc) { + case 16: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + case 8: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + default: + pr_err("PL310 OF: cache setting yield illegal associativity\n"); + pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); + break; + } } static const struct l2c_init_data of_l2c310_data __initconst = { -- cgit v1.1 From 562c85cadb065e33ec9f651b8d41cdfd3054a5d0 Mon Sep 17 00:00:00 2001 From: Yalin Wang Date: Fri, 26 Sep 2014 03:30:59 +0100 Subject: ARM: 8168/1: extend __init_end to a page align address This patch changes the __init_end address to a page align address, so that free_initmem() can free the whole .init section, because if the end address is not page aligned, it will round down to a page align address, then the tail unligned page will not be freed. Signed-off-by: wang Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6f57cb9..8e95aa4 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -219,8 +219,8 @@ SECTIONS __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; #else - __init_end = .; . = ALIGN(THREAD_SIZE); + __init_end = .; __data_loc = .; #endif -- cgit v1.1 From 421520ba98290a73b35b7644e877a48f18e06004 Mon Sep 17 00:00:00 2001 From: Yalin Wang Date: Fri, 26 Sep 2014 03:07:09 +0100 Subject: ARM: 8167/1: extend the reserved memory for initrd to be page aligned This patch extends the start and end address of initrd to be page aligned, so that we can free all memory including the un-page aligned head or tail page of initrd, if the start or end address of initrd are not page aligned, the page can't be freed by free_initrd_mem() function. Signed-off-by: Yalin Wang Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/init.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 659c75d..9221645 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -636,6 +636,11 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { + if (start == initrd_start) + start = round_down(start, PAGE_SIZE); + if (end == initrd_end) + end = round_up(end, PAGE_SIZE); + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); free_reserved_area((void *)start, (void *)end, -1, "initrd"); } -- cgit v1.1