From bbeb9209515989ff47802d4e5d5702178c8e42c4 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 25 Aug 2015 13:52:09 +0100 Subject: ARM: 8422/1: enable imprecise aborts during early kernel startup This patch adds imprecise abort enable/disable macros and uses them to enable imprecise aborts early when starting the kernel. This helps in tracking down the real cause for such imprecise abort, as they are handled as soon as they occur. Until now those aborts would only be enabled when entering the userspace and as a consequence crash the first userspace process if any abort had been raised during kernel startup. Signed-off-by: Fabrice Gasnier Signed-off-by: Lucas Stach Signed-off-by: Russell King --- arch/arm/include/asm/irqflags.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 4390814..e6b70d9 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h @@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void) #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") + +#ifndef CONFIG_CPU_V7M +#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc") +#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc") +#else +#define local_abt_enable() do { } while (0) +#define local_abt_disable() do { } while (0) +#endif #else /* @@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void) : "memory", "cc"); \ }) +#define local_abt_enable() do { } while (0) +#define local_abt_disable() do { } while (0) #endif /* -- cgit v1.1 From 4caa9dda388f34f957a9eb52b9f5ef1a8c975c7b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 26 Aug 2015 07:49:11 +0100 Subject: ARM: 8424/1: add const qualifier to the argument of smp_set_ops() This function just copies '*ops' to 'smp_ops', so the given structure '*ops' is not modified at all. Signed-off-by: Masahiro Yamada Acked-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/include/asm/smp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index ef35665..2f8bd44 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -122,6 +122,6 @@ struct of_cpu_method { /* * set platform specific SMP operations */ -extern void smp_set_ops(struct smp_operations *); +extern void smp_set_ops(const struct smp_operations *); #endif /* ifndef __ASM_ARM_SMP_H */ -- cgit v1.1 From f460b6abdeeafd30c3ee737c843be17b1ceb38e5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 26 Aug 2015 07:49:12 +0100 Subject: ARM: 8423/1: add const qualifier to smp_operations member in structures The core framework does not modify smp_operations structures. To clarify it, this commit adds 'const' qualifier to the 'ops' member of struct of_cpu_method and the 'smp' member of struct machine_desc. This change allows each SoC code to add 'const' qualifier to its smp_operation structure. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King --- arch/arm/include/asm/mach/arch.h | 2 +- arch/arm/include/asm/smp.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index cb3a407..5c1ad11 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -47,7 +47,7 @@ struct machine_desc { unsigned l2c_aux_val; /* L2 cache aux value */ unsigned l2c_aux_mask; /* L2 cache aux mask */ void (*l2c_write_sec)(unsigned long, unsigned); - struct smp_operations *smp; /* SMP operations */ + const struct smp_operations *smp; /* SMP operations */ bool (*smp_init)(void); void (*fixup)(struct tag *, char **); void (*dt_fixup)(void); diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2f8bd44..3d6dc8b 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -112,7 +112,7 @@ struct smp_operations { struct of_cpu_method { const char *method; - struct smp_operations *ops; + const struct smp_operations *ops; }; #define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ -- cgit v1.1 From 803e3dbcb4cf80c898faccf01875f6ff6e5e76fd Mon Sep 17 00:00:00 2001 From: Sergey Dyasly Date: Wed, 9 Sep 2015 16:27:18 +0100 Subject: ARM: 8430/1: use default ioremap alignment for SMP or LPAE 16MB alignment for ioremap mappings was added by commit a069c896d0d6 ("[ARM] 3705/1: add supersection support to ioremap()") in order to support supersection mappings. But __arm_ioremap_pfn_caller uses section and supersection mappings only in !SMP && !LPAE case. There is no need for such big alignment if either SMP or LPAE is enabled. After this change, ioremap will use default maximum alignment of 128 pages. Link: https://lkml.kernel.org/g/1419328813-2211-1-git-send-email-d.safonov@partner.samsung.com Cc: Guan Xuetao Cc: Nicolas Pitre Cc: James Bottomley Cc: Will Deacon Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Andrew Morton Cc: Dmitry Safonov Signed-off-by: Sergey Dyasly Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 98d58bb..c79b57b 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -76,10 +76,12 @@ */ #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) +#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) /* * Allow 16MB-aligned ioremap pages */ #define IOREMAP_MAX_ORDER 24 +#endif #else /* CONFIG_MMU */ -- cgit v1.1 From 6ff0966052c46efb53980b8a1add2e7b49c9f560 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sun, 13 Sep 2015 03:25:26 +0100 Subject: ARM: 8432/1: move VMALLOC_END from 0xff000000 to 0xff800000 There is a 12MB unused region in our memory map between the vmalloc and fixmap areas. This became unused with commit e9da6e9905e6, confirmed with commit 64d3b6a3f480. We also have a 8MB guard area before the vmalloc area. With the default 240MB vmalloc area size and the current VMALLOC_END definition, that means the end of low memory ends up at 0xef800000 which is unfortunate for 768MB machines where 8MB of RAM is lost to himem. Let's move VMALLOC_END to 0xff800000 so the guard area won't chop the top of the 768MB low memory area while keeping the default vmalloc area size unchanged and still preserving a gap between the vmalloc and fixmap areas. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f403541..348caab 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -43,7 +43,7 @@ */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_END 0xff000000UL +#define VMALLOC_END 0xff800000UL #define LIBRARY_TEXT_START 0x0c000000 -- cgit v1.1 From db695c0509d6ec9046ee5e4c520a19fa17d9fce2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 21 Sep 2015 19:34:28 +0100 Subject: ARM: remove user cmpxchg syscall Mark Brand reports that a NEEDS_SYSCALL_FOR_CMPXCHG enabled kernel would open a security hole in the ghost syscall used to implement cmpxchg, as it fails to validate the user pointer. However, in order for this option to be enabled, you'd need to be building a pre-ARMv6 kernel with SMP support. There is only one system known which fits that, which is an early ARM SMP FPGA implementation based on the ARM926T. In any case, the Kconfig does not allow SMP to be enabled for pre-ARMv6 systems. Moreover, even if NEEDS_SYSCALL_FOR_CMPXCHG were to be enabled, the kernel would not build as __ARM_NR_cmpxchg64 is not defined. The simple answer is to remove the buggy code. Reported-by: Mark Brand Signed-off-by: Russell King --- arch/arm/include/asm/unistd.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 32640c4..8f63daf 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -21,13 +21,6 @@ */ #define __NR_syscalls (388) -/* - * *NOTE*: This is a ghost syscall private to the kernel. Only the - * __kuser_cmpxchg code in entry-armv.S should be aware of its - * existence. Don't ever use this from user code. - */ -#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) - #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE -- cgit v1.1 From e8973a889e69cb86cac08bec2863c878c0d27af9 Mon Sep 17 00:00:00 2001 From: Sarbojit Ganguly Date: Fri, 9 Oct 2015 12:10:02 +0100 Subject: ARM: 8443/1: Adding support for atomic half word exchange Since support for half-word atomic exchange was not there and Qspinlock on ARM requires it, modified __xchg() to add support for that as well. ARMv6 and lower does not support ldrex{b,h} so, added a guard code to prevent build breaks. Signed-off-by: Sarbojit Ganguly Signed-off-by: Russell King --- arch/arm/include/asm/cmpxchg.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 916a274..97882f9 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { #if __LINUX_ARM_ARCH__ >= 6 +#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */ case 1: asm volatile("@ __xchg1\n" "1: ldrexb %0, [%3]\n" @@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size : "r" (x), "r" (ptr) : "memory", "cc"); break; + case 2: + asm volatile("@ __xchg2\n" + "1: ldrexh %0, [%3]\n" + " strexh %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#endif case 4: asm volatile("@ __xchg4\n" "1: ldrex %0, [%3]\n" -- cgit v1.1