From 7fb2bbf4d9e7e62057184f1e061566459eef6c79 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 9 Jul 2009 15:15:12 +0100 Subject: MMC: MMCI: allow GPIOs to be passed Add and initialize the gpio_wp and gpio_cd members. We need to ensure that all users are covered, because GPIO 0 may be valid. Signed-off-by: Russell King Acked-by: Linus Walleij --- arch/arm/include/asm/mach/mmc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h index 4da332b..b490ecc 100644 --- a/arch/arm/include/asm/mach/mmc.h +++ b/arch/arm/include/asm/mach/mmc.h @@ -10,6 +10,8 @@ struct mmc_platform_data { unsigned int ocr_mask; /* available voltages */ u32 (*translate_vdd)(struct device *, unsigned int); unsigned int (*status)(struct device *); + int gpio_wp; + int gpio_cd; }; #endif -- cgit v1.1 From f7a55fa6ecef8be6d15bd79a803e44a3187ce9d6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 11 Jul 2009 16:51:40 +0100 Subject: [ARM] remove L_PTE_BUFFERABLE and L_PTE_CACHEABLE These old symbols are meaningless now that we have memory type support implemented. The entire memory type field needs to be modified rather than just a few bits twiddled. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c433c6c..d575666 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -164,8 +164,6 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define L_PTE_PRESENT (1 << 0) #define L_PTE_FILE (1 << 1) /* only when !PRESENT */ #define L_PTE_YOUNG (1 << 1) -#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */ -#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */ #define L_PTE_DIRTY (1 << 6) #define L_PTE_WRITE (1 << 7) #define L_PTE_USER (1 << 8) -- cgit v1.1 From 6a00cded91532f3d58e07729ba56269339281d8e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 11 Jul 2009 16:57:20 +0100 Subject: [ARM] pgtable: rearrange file PTE bit allocation For future compatibility, we need to ensure that swap and file Linux PTEs conform with the hardware PTEs "fault" encoding. Swap PTEs already fit in with this, but file PTEs do not. Shift them by one bit to ensure that they conform, using bit 2 to distinguish between swap and file PTEs. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index d575666..9655bce3 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -162,8 +162,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * entries are stored 1024 bytes below. */ #define L_PTE_PRESENT (1 << 0) -#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ #define L_PTE_YOUNG (1 << 1) +#define L_PTE_FILE (1 << 2) /* only when !PRESENT */ #define L_PTE_DIRTY (1 << 6) #define L_PTE_WRITE (1 << 7) #define L_PTE_USER (1 << 8) @@ -379,13 +379,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <--- type --> 0 0 + * <--------------- offset --------------------> <- type --> 0 0 0 * - * This gives us up to 127 swap files and 32GB per swap file. Note that + * This gives us up to 63 swap files and 32GB per swap file. Note that * the offset field is always non-zero. */ -#define __SWP_TYPE_SHIFT 2 -#define __SWP_TYPE_BITS 7 +#define __SWP_TYPE_SHIFT 3 +#define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) @@ -409,13 +409,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <------------------------ offset -------------------------> 1 0 + * <----------------------- offset ------------------------> 1 0 0 */ #define pte_file(pte) (pte_val(pte) & L_PTE_FILE) -#define pte_to_pgoff(x) (pte_val(x) >> 2) -#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 3) +#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE) -#define PTE_FILE_MAX_BITS 30 +#define PTE_FILE_MAX_BITS 29 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* FIXME: this is not correct */ -- cgit v1.1 From 4bf1fa5a34aa2dd0d2cc58f0fc213a2e22d007a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 21 Jul 2009 09:56:27 +0100 Subject: [ARM] 5613/1: implement CALLER_ADDRESSx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From: Uwe Kleine-König As __builtin_return_address(n) doesn't work for ARM with n > 0, the kernel needs its own implementation. This fixes many warnings saying: warning: unsupported argument to '__builtin_return_address' The new methods and walk_stackframe must not be instrumented because CALLER_ADDRESSx is used in the various tracers and tracing the tracer is a bad idea. What's currently missing is an implementation using unwind tables. This is not fatal though, it's just that the tracers don't get enough information to be really useful. Note that if both ARM_UNWIND and FRAME_POINTER are enabled, walk_stackframe uses unwind information. So in this case the same implementation is used as when FRAME_POINTER is disabled. Cc: Catalin Marinas Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/include/asm/ftrace.h | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 39c8bc1..d74265c 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -11,4 +11,38 @@ extern void mcount(void); #endif +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) +/* + * return_address uses walk_stackframe to do it's work. If both + * CONFIG_FRAME_POINTER=y and CONFIG_ARM_UNWIND=y walk_stackframe uses unwind + * information. For this to work in the function tracer many functions would + * have to be marked with __notrace. So for now just depend on + * !CONFIG_ARM_UNWIND. + */ + +void *return_address(unsigned int); + +#else + +extern inline void *return_address(unsigned int level) +{ + return NULL; +} + +#endif + +#define HAVE_ARCH_CALLER_ADDR + +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 ((unsigned long)return_address(1)) +#define CALLER_ADDR2 ((unsigned long)return_address(2)) +#define CALLER_ADDR3 ((unsigned long)return_address(3)) +#define CALLER_ADDR4 ((unsigned long)return_address(4)) +#define CALLER_ADDR5 ((unsigned long)return_address(5)) +#define CALLER_ADDR6 ((unsigned long)return_address(6)) + +#endif /* ifndef __ASSEMBLY__ */ + #endif /* _ASM_ARM_FTRACE */ -- cgit v1.1 From 0becb088501886f37ade38762c8eaaf4263572cc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:53 +0100 Subject: Thumb-2: Add macros for the unified assembler syntax This patch adds various C and assembler macros that help with using the unified assembler syntax for compiling files to either ARM or Thumb-2 modes. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/unified.h | 126 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 arch/arm/include/asm/unified.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h new file mode 100644 index 0000000..073e85b --- /dev/null +++ b/arch/arm/include/asm/unified.h @@ -0,0 +1,126 @@ +/* + * include/asm-arm/unified.h - Unified Assembler Syntax helper macros + * + * Copyright (C) 2008 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_UNIFIED_H +#define __ASM_UNIFIED_H + +#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED) + .syntax unified +#endif + +#ifdef CONFIG_THUMB2_KERNEL + +#if __GNUC__ < 4 +#error Thumb-2 kernel requires gcc >= 4 +#endif + +/* The CPSR bit describing the instruction set (Thumb) */ +#define PSR_ISETSTATE PSR_T_BIT + +#define ARM(x...) +#define THUMB(x...) x +#define W(instr) instr.w +#define BSYM(sym) sym + 1 + +#else /* !CONFIG_THUMB2_KERNEL */ + +/* The CPSR bit describing the instruction set (ARM) */ +#define PSR_ISETSTATE 0 + +#define ARM(x...) x +#define THUMB(x...) +#define W(instr) instr +#define BSYM(sym) sym + +#endif /* CONFIG_THUMB2_KERNEL */ + +#ifndef CONFIG_ARM_ASM_UNIFIED + +/* + * If the unified assembly syntax isn't used (in ARM mode), these + * macros expand to an empty string + */ +#ifdef __ASSEMBLY__ + .macro it, cond + .endm + .macro itt, cond + .endm + .macro ite, cond + .endm + .macro ittt, cond + .endm + .macro itte, cond + .endm + .macro itet, cond + .endm + .macro itee, cond + .endm + .macro itttt, cond + .endm + .macro ittte, cond + .endm + .macro ittet, cond + .endm + .macro ittee, cond + .endm + .macro itett, cond + .endm + .macro itete, cond + .endm + .macro iteet, cond + .endm + .macro iteee, cond + .endm +#else /* !__ASSEMBLY__ */ +__asm__( +" .macro it, cond\n" +" .endm\n" +" .macro itt, cond\n" +" .endm\n" +" .macro ite, cond\n" +" .endm\n" +" .macro ittt, cond\n" +" .endm\n" +" .macro itte, cond\n" +" .endm\n" +" .macro itet, cond\n" +" .endm\n" +" .macro itee, cond\n" +" .endm\n" +" .macro itttt, cond\n" +" .endm\n" +" .macro ittte, cond\n" +" .endm\n" +" .macro ittet, cond\n" +" .endm\n" +" .macro ittee, cond\n" +" .endm\n" +" .macro itett, cond\n" +" .endm\n" +" .macro itete, cond\n" +" .endm\n" +" .macro iteet, cond\n" +" .endm\n" +" .macro iteee, cond\n" +" .endm\n"); +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_ARM_ASM_UNIFIED */ + +#endif /* !__ASM_UNIFIED_H */ -- cgit v1.1 From b86040a59feb255a8193173caa4d5199464433d5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:54 +0100 Subject: Thumb-2: Implementation of the unified start-up and exceptions code This patch implements the ARM/Thumb-2 unified kernel start-up and exception handling code. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/assembler.h | 11 +++++++++++ arch/arm/include/asm/futex.h | 1 + 2 files changed, 12 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 15f8a09..1acd1da 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -127,3 +127,14 @@ #endif #endif .endm + +#ifdef CONFIG_THUMB2_KERNEL + .macro setmode, mode, reg + mov \reg, #\mode + msr cpsr_c, \reg + .endm +#else + .macro setmode, mode, reg + msr cpsr_c, #\mode + .endm +#endif diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 9ee743b..bfcc159 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -99,6 +99,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrt %0, [%3]\n" " teq %0, %1\n" + " it eq @ explicit IT needed for the 2b label\n" "2: streqt %2, [%3]\n" "3:\n" " .section __ex_table,\"a\"\n" -- cgit v1.1 From 8b592783a2e8b7721a99730bd549aab5208f36af Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:57 +0100 Subject: Thumb-2: Implement the unified arch/arm/lib functions This patch adds the ARM/Thumb-2 unified support for the arch/arm/lib/* files. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/assembler.h | 73 ++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/uaccess.h | 7 ++-- 2 files changed, 78 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 1acd1da..2b60c7d 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -138,3 +138,76 @@ msr cpsr_c, #\mode .endm #endif + +/* + * STRT/LDRT access macros with ARM and Thumb-2 variants + */ +#ifdef CONFIG_THUMB2_KERNEL + + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort +9999: + .if \inc == 1 + \instr\cond\()bt \reg, [\ptr, #\off] + .elseif \inc == 4 + \instr\cond\()t \reg, [\ptr, #\off] + .else + .error "Unsupported inc macro argument" + .endif + + .section __ex_table,"a" + .align 3 + .long 9999b, \abort + .previous + .endm + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort + @ explicit IT instruction needed because of the label + @ introduced by the USER macro + .ifnc \cond,al + .if \rept == 1 + itt \cond + .elseif \rept == 2 + ittt \cond + .else + .error "Unsupported rept macro argument" + .endif + .endif + + @ Slightly optimised to avoid incrementing the pointer twice + usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort + .if \rept == 2 + usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort + .endif + + add\cond \ptr, #\rept * \inc + .endm + +#else /* !CONFIG_THUMB2_KERNEL */ + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort + .rept \rept +9999: + .if \inc == 1 + \instr\cond\()bt \reg, [\ptr], #\inc + .elseif \inc == 4 + \instr\cond\()t \reg, [\ptr], #\inc + .else + .error "Unsupported inc macro argument" + .endif + + .section __ex_table,"a" + .align 3 + .long 9999b, \abort + .previous + .endr + .endm + +#endif /* CONFIG_THUMB2_KERNEL */ + + .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc str, \reg, \ptr, \inc, \cond, \rept, \abort + .endm + + .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort + .endm diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 0da9bc9..1d6bd40 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -17,6 +17,7 @@ #include #include #include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -365,8 +366,10 @@ do { \ #define __put_user_asm_dword(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strt " __reg_oper1 ", [%1], #4\n" \ - "2: strt " __reg_oper0 ", [%1]\n" \ + ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ -- cgit v1.1 From adca6dc23bc620ea95392659625200a252b97be3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:59 +0100 Subject: Thumb-2: Add support for loadable modules Modules compiled to Thumb-2 have two additional relocations needing to be resolved at load time, R_ARM_THM_CALL and R_ARM_THM_JUMP24, for BL and B.W instructions. The maximum Thumb-2 addressing range is +/-2^24 (+/-16MB) therefore the MODULES_VADDR macro in asm/memory.h is set to (MODULES_END - 8MB) for the Thumb-2 compiled kernel. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/elf.h | 3 +++ arch/arm/include/asm/memory.h | 6 ++++++ 2 files changed, 9 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index c207504..c3b911e 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -55,6 +55,9 @@ typedef struct user_fp elf_fpregset_t; #define R_ARM_MOVW_ABS_NC 43 #define R_ARM_MOVT_ABS 44 +#define R_ARM_THM_CALL 10 +#define R_ARM_THM_JUMP24 30 + /* * These are used to set parameters in the core dumps. */ diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 85763db..376be1a 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -44,7 +44,13 @@ * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. */ +#ifndef CONFIG_THUMB2_KERNEL #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) +#else +/* smaller range for Thumb-2 symbols relocation (2^24)*/ +#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) +#endif + #if TASK_SIZE > MODULES_VADDR #error Top of user space clashes with start of module space #endif -- cgit v1.1 From 9a45f026bb3ba720765d46f62f5c464d3317a8ab Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:34:56 +0100 Subject: nommu: Remove the context.id from asm-offsets.c when !MMU There is no MMU context switching on MMU-less systems. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/mmu_context.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 263fed0..bcdb929 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -62,8 +62,10 @@ static inline void check_context(struct mm_struct *mm) static inline void check_context(struct mm_struct *mm) { +#ifdef CONFIG_MMU if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); +#endif } #define init_new_context(tsk,mm) 0 -- cgit v1.1 From 68b7f7153fa58df710924fbb79722717d2d16094 Mon Sep 17 00:00:00 2001 From: Paul Brook Date: Fri, 24 Jul 2009 12:34:58 +0100 Subject: nommu: ptrace support The patch below adds ARM ptrace functions to get the process load address. This is required for useful userspace debugging on mmuless systems. These values are obtained by reading magic offsets with PTRACE_PEEKUSR, as on other nommu targets. I picked arbitrary large values for the offsets. Signed-off-by: Paul Brook --- arch/arm/include/asm/ptrace.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 67b833c..bbecccd 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -82,6 +82,14 @@ #define PSR_ENDSTATE 0 #endif +/* + * These are 'magic' values for PTRACE_PEEKUSR that return info about where a + * process is located in memory. + */ +#define PT_TEXT_ADDR 0x10000 +#define PT_DATA_ADDR 0x10004 +#define PT_TEXT_END_ADDR 0x10008 + #ifndef __ASSEMBLY__ /* -- cgit v1.1 From 2732f4b6f11689fa08e2db5689fc652d608936b5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:35:01 +0100 Subject: nommu: Remove the memory_start/end variables from ARM page-nommu.h These variables do not seem to be used anywhere in the kernel. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/page-nommu.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index 3574c0d..d1b162a 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -43,7 +43,4 @@ typedef unsigned long pgprot_t; #define __pmd(x) (x) #define __pgprot(x) (x) -extern unsigned long memory_start; -extern unsigned long memory_end; - #endif -- cgit v1.1 From 181f817eaaca4c1f8a9c265d339d2b96de8b245d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 13 Aug 2009 20:38:16 +0200 Subject: [ARM] support tracing when using newer compilers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since gcc 4.4 the name and calling convention for function profiling on ARM changed. With this patch both types are supported. See http://sourceware.org/ml/libc-ports/2008-04/msg00009.html for some details. Lightly-Tested-by: Anand Gadiyar Tested-by: Kevin Hilman Signed-off-by: Uwe Kleine-König --- arch/arm/include/asm/ftrace.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 39c8bc1..0d4c478 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLY__ extern void mcount(void); +extern void __gnu_mcount_nc(void); #endif #endif -- cgit v1.1 From 0d928b0b616d1c5c5fe76019a87cba171ca91633 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 13 Aug 2009 20:38:17 +0200 Subject: Complete irq tracing support for ARM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this patch enabling and disabling irqs in assembler code and by the hardware wasn't tracked completly. I had to transpose two instructions in arch/arm/lib/bitops.h because restore_irqs doesn't preserve the flags with CONFIG_TRACE_IRQFLAGS=y Signed-off-by: Uwe Kleine-König Cc: Russell King Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Uwe Kleine-König --- arch/arm/include/asm/assembler.h | 49 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 15f8a09..44912cd 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -74,23 +74,56 @@ * Enable and disable interrupts */ #if __LINUX_ARM_ARCH__ >= 6 - .macro disable_irq + .macro disable_irq_notrace cpsid i .endm - .macro enable_irq + .macro enable_irq_notrace cpsie i .endm #else - .macro disable_irq + .macro disable_irq_notrace msr cpsr_c, #PSR_I_BIT | SVC_MODE .endm - .macro enable_irq + .macro enable_irq_notrace msr cpsr_c, #SVC_MODE .endm #endif + .macro asm_trace_hardirqs_off +#if defined(CONFIG_TRACE_IRQFLAGS) + stmdb sp!, {r0-r3, ip, lr} + bl trace_hardirqs_off + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on_cond, cond +#if defined(CONFIG_TRACE_IRQFLAGS) + /* + * actually the registers should be pushed and pop'd conditionally, but + * after bl the flags are certainly clobbered + */ + stmdb sp!, {r0-r3, ip, lr} + bl\cond trace_hardirqs_on + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on + asm_trace_hardirqs_on_cond al + .endm + + .macro disable_irq + disable_irq_notrace + asm_trace_hardirqs_off + .endm + + .macro enable_irq + asm_trace_hardirqs_on + enable_irq_notrace + .endm /* * Save the current IRQ state and disable IRQs. Note that this macro * assumes FIQs are enabled, and that the processor is in SVC mode. @@ -104,10 +137,16 @@ * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. */ - .macro restore_irqs, oldcpsr + .macro restore_irqs_notrace, oldcpsr msr cpsr_c, \oldcpsr .endm + .macro restore_irqs, oldcpsr + tst \oldcpsr, #PSR_I_BIT + asm_trace_hardirqs_on_cond eq + restore_irqs_notrace \oldcpsr + .endm + #define USER(x...) \ 9999: x; \ .section __ex_table,"a"; \ -- cgit v1.1 From 369842658a36bcea28ecb643ba4bdb53919330dd Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Sat, 15 Aug 2009 12:58:11 +0100 Subject: ARM: 5677/1: ARM support for TIF_RESTORE_SIGMASK/pselect6/ppoll/epoll_pwait This patch adds support for TIF_RESTORE_SIGMASK to ARM's signal handling, which allows to hook up the pselect6, ppoll, and epoll_pwait syscalls on ARM. Tested here with eabi userspace and a test program with a deliberate race between a child's exit and the parent's sigprocmask/select sequence. Using sys_pselect6() instead of sigprocmask/select reliably prevents the race. The other arch's support for TIF_RESTORE_SIGMASK has evolved over time: In 2.6.16: - add TIF_RESTORE_SIGMASK which parallels TIF_SIGPENDING - test both when checking for pending signal [changed later] - reimplement sys_sigsuspend() to use current->saved_sigmask, TIF_RESTORE_SIGMASK [changed later], and -ERESTARTNOHAND; ditto for sys_rt_sigsuspend(), but drop private code and use common code via __ARCH_WANT_SYS_RT_SIGSUSPEND; - there are now no "extra" calls to do_signal() so its oldset parameter is always ¤t->blocked so need not be passed, also its return value is changed to void - change handle_signal() to return 0/-errno - change do_signal() to honor TIF_RESTORE_SIGMASK: + get oldset from current->saved_sigmask if TIF_RESTORE_SIGMASK is set + if handle_signal() was successful then clear TIF_RESTORE_SIGMASK + if no signal was delivered and TIF_RESTORE_SIGMASK is set then clear it and restore the sigmask - hook up sys_pselect6() and sys_ppoll() In 2.6.19: - hook up sys_epoll_pwait() In 2.6.26: - allow archs to override how TIF_RESTORE_SIGMASK is implemented; default set_restore_sigmask() sets both TIF_RESTORE_SIGMASK and TIF_SIGPENDING; archs need now just test TIF_SIGPENDING again when checking for pending signal work; some archs now implement TIF_RESTORE_SIGMASK as a secondary/non-atomic thread flag bit - call set_restore_sigmask() in sys_sigsuspend() instead of setting TIF_RESTORE_SIGMASK In 2.6.29-rc: - kill sys_pselect7() which no arch wanted So for 2.6.31-rc6/ARM this patch does the following: - Add TIF_RESTORE_SIGMASK. Use the generic set_restore_sigmask() which sets both TIF_SIGPENDING and TIF_RESTORE_SIGMASK, so TIF_RESTORE_SIGMASK need not claim one of the scarce low thread flags, and existing TIF_SIGPENDING and _TIF_WORK_MASK tests need not be extended for TIF_RESTORE_SIGMASK. - sys_sigsuspend() is reimplemented to use current->saved_sigmask and set_restore_sigmask(), making it identical to most other archs - The private code for sys_rt_sigsuspend() is removed, instead generic code supplies it via __ARCH_WANT_SYS_RT_SIGSUSPEND. - sys_sigsuspend() and sys_rt_sigsuspend() no longer need a pt_regs parameter, so their assembly code wrappers are removed. - handle_signal() is changed to return 0 on success or -errno. - The oldset parameter to do_signal() is now redundant and removed, and the return value is now also redundant and changed to void. - do_signal() is changed to honor TIF_RESTORE_SIGMASK: + get oldset from current->saved_sigmask if TIF_RESTORE_SIGMASK is set + if handle_signal() was successful then clear TIF_RESTORE_SIGMASK + if no signal was delivered and TIF_RESTORE_SIGMASK is set then clear it and restore the sigmask - Hook up sys_pselect6, sys_ppoll, and sys_epoll_pwait. Signed-off-by: Mikael Pettersson Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 2 ++ arch/arm/include/asm/unistd.h | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 73394e5..e20d805 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -140,6 +140,7 @@ extern void vfp_sync_state(struct thread_info *thread); #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 #define TIF_FREEZE 19 +#define TIF_RESTORE_SIGMASK 20 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -147,6 +148,7 @@ extern void vfp_sync_state(struct thread_info *thread); #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_FREEZE (1 << TIF_FREEZE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 0e97b8c..9122c9e 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -360,8 +360,8 @@ #define __NR_readlinkat (__NR_SYSCALL_BASE+332) #define __NR_fchmodat (__NR_SYSCALL_BASE+333) #define __NR_faccessat (__NR_SYSCALL_BASE+334) - /* 335 for pselect6 */ - /* 336 for ppoll */ +#define __NR_pselect6 (__NR_SYSCALL_BASE+335) +#define __NR_ppoll (__NR_SYSCALL_BASE+336) #define __NR_unshare (__NR_SYSCALL_BASE+337) #define __NR_set_robust_list (__NR_SYSCALL_BASE+338) #define __NR_get_robust_list (__NR_SYSCALL_BASE+339) @@ -372,7 +372,7 @@ #define __NR_vmsplice (__NR_SYSCALL_BASE+343) #define __NR_move_pages (__NR_SYSCALL_BASE+344) #define __NR_getcpu (__NR_SYSCALL_BASE+345) - /* 346 for epoll_pwait */ +#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346) #define __NR_kexec_load (__NR_SYSCALL_BASE+347) #define __NR_utimensat (__NR_SYSCALL_BASE+348) #define __NR_signalfd (__NR_SYSCALL_BASE+349) @@ -432,6 +432,7 @@ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) #define __ARCH_WANT_SYS_TIME -- cgit v1.1 From 65cec8e3db606608fd1f8dfc4a1c7c37bfba9173 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Aug 2009 20:02:06 +0100 Subject: ARM: implement highpte Add the ARM implementation of highpte, which allows PTE tables to be placed in highmem. Unfortunately, we do not offer highpte support when support for L2 cache is enabled. Signed-off-by: Russell King --- arch/arm/include/asm/pgalloc.h | 16 ++++++++++++---- arch/arm/include/asm/pgtable.h | 17 +++++++++++++---- 2 files changed, 25 insertions(+), 8 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 3dcd64b..b12cc98 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -36,6 +36,8 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); #define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) + /* * Allocate one PTE table. * @@ -57,7 +59,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { pte_t *pte; - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + pte = (pte_t *)__get_free_page(PGALLOC_GFP); if (pte) { clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); pte += PTRS_PER_PTE; @@ -71,10 +73,16 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); +#ifdef CONFIG_HIGHPTE + pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0); +#else + pte = alloc_pages(PGALLOC_GFP, 0); +#endif if (pte) { - void *page = page_address(pte); - clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); + if (!PageHighMem(pte)) { + void *page = page_address(pte); + clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); + } pgtable_page_ctor(pte); } diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9655bce3..201ccaa 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -262,10 +262,19 @@ extern struct page *empty_zero_page; #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) + +#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr)) +#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0) +#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1) + +#ifndef CONFIG_HIGHPTE +#define __pte_map(dir,km) pmd_page_vaddr(*(dir)) +#define __pte_unmap(pte,km) do { } while (0) +#else +#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE) +#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km) +#endif #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) -- cgit v1.1 From b7cfda9fc3d7aa60cffab5367f2a72a4a70060cd Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 7 Sep 2009 15:06:42 +0100 Subject: ARM: Fix pfn_valid() for sparse memory On OMAP platforms, some people want to declare to segment up the memory between the kernel and a separate application such that there is a hole in the middle of the memory as far as Linux is concerned. However, they want to be able to mmap() the hole. This currently causes problems, because update_mmu_cache() thinks that there are valid struct pages for the "hole". Fix this by making pfn_valid() slightly more expensive, by checking whether the PFN is contained within the meminfo array. Signed-off-by: Russell King Tested-by: Khasim Syed Mohammed --- arch/arm/include/asm/memory.h | 17 ----------------- arch/arm/include/asm/page.h | 4 ++++ 2 files changed, 4 insertions(+), 17 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 85763db..b3b54c6 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -212,7 +212,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) * * page_to_pfn(page) convert a struct page * to a PFN number * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * - * pfn_valid(pfn) indicates whether a PFN number is valid * * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid @@ -221,10 +220,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET -#ifndef CONFIG_SPARSEMEM -#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) -#endif - #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) @@ -241,18 +236,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn) #define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT) -#define pfn_valid(pfn) \ - ({ \ - unsigned int nid = PFN_TO_NID(pfn); \ - int valid = nid < MAX_NUMNODES; \ - if (valid) { \ - pg_data_t *node = NODE_DATA(nid); \ - valid = (pfn - node->node_start_pfn) < \ - node->node_spanned_pages; \ - } \ - valid; \ - }) - #define virt_to_page(kaddr) \ (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 9c746af..3a32af4 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -194,6 +194,10 @@ typedef unsigned long pgprot_t; typedef struct page *pgtable_t; +#ifndef CONFIG_SPARSEMEM +extern int pfn_valid(unsigned long); +#endif + #include #endif /* !__ASSEMBLY__ */ -- cgit v1.1