From b849a60e0903b1c5430c3859864554662e127a8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 16 Jan 2012 10:34:31 +0100 Subject: ARM: make cr_alignment read-only #ifndef CONFIG_CPU_CP15 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes cr_alignment a constant 0 to break code that tries to modify the value as it's likely that it's built on wrong assumption when CONFIG_CPU_CP15 isn't defined. For code that is only reading the value 0 is more or less a fine value to report. Signed-off-by: Uwe Kleine-König Message-Id: 1358413196-5609-2-git-send-email-u.kleine-koenig@pengutronix.de (v8) --- arch/arm/include/asm/cp15.h | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 5ef4d80..1f3262e 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -42,6 +42,8 @@ #define vectors_high() (0) #endif +#ifdef CONFIG_CPU_CP15 + extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */ @@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val) isb(); } -#endif +#else /* ifdef CONFIG_CPU_CP15 */ + +/* + * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the + * minds of the developers). Yielding 0 for machines without a cp15 (and making + * it read-only) is fine for most cases and saves quite some #ifdeffery. + */ +#define cr_no_alignment UL(0) +#define cr_alignment UL(0) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +#endif /* ifndef __ASSEMBLY__ */ #endif -- cgit v1.1 From 473296e072ae77e96586dcb39a1dd5d10db22611 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 30 Jan 2013 12:07:14 +0100 Subject: ARM: sync comments about available data abort models with the code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While at it bring both in the same increasing order. Signed-off-by: Uwe Kleine-König Message-Id: 1359544151-26744-1-git-send-email-u.kleine-koenig@pengutronix.de --- arch/arm/include/asm/glue-df.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 8cacbcd..b6e9f2c 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -18,12 +18,12 @@ * ================ * * We have the following to choose from: - * arm6 - ARM6 style * arm7 - ARM7 style * v4_early - ARMv4 without Thumb early abort handler * v4t_late - ARMv4 with Thumb late abort handler * v4t_early - ARMv4 with Thumb early abort handler - * v5tej_early - ARMv5 with Thumb and Java early abort handler + * v5t_early - ARMv5 with Thumb early abort handler + * v5tj_early - ARMv5 with Thumb and Java early abort handler * xscale - ARMv5 with Thumb with Xscale extensions * v6_early - ARMv6 generic early abort handler * v7_early - ARMv7 generic early abort handler @@ -39,19 +39,19 @@ # endif #endif -#ifdef CONFIG_CPU_ABRT_LV4T +#ifdef CONFIG_CPU_ABRT_EV4 # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v4t_late_abort +# define CPU_DABORT_HANDLER v4_early_abort # endif #endif -#ifdef CONFIG_CPU_ABRT_EV4 +#ifdef CONFIG_CPU_ABRT_LV4T # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v4_early_abort +# define CPU_DABORT_HANDLER v4t_late_abort # endif #endif @@ -63,19 +63,19 @@ # endif #endif -#ifdef CONFIG_CPU_ABRT_EV5TJ +#ifdef CONFIG_CPU_ABRT_EV5T # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v5tj_early_abort +# define CPU_DABORT_HANDLER v5t_early_abort # endif #endif -#ifdef CONFIG_CPU_ABRT_EV5T +#ifdef CONFIG_CPU_ABRT_EV5TJ # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v5t_early_abort +# define CPU_DABORT_HANDLER v5tj_early_abort # endif #endif -- cgit v1.1 From 6ebd4d038dbb626a43d87db3007e71f92f49d7b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 31 Jan 2013 11:08:03 +0100 Subject: ARM: stub out read_cpuid and read_cpuid_ext for CPU_CP15=n MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Traditionally for !CPU_CP15 read_cpuid and read_cpuid_ext returned the processor id independent of the parameter passed in. This is wrong of course but theoretically this doesn't harm because it's only called on machines having a cp15. Instead return 0 unconditionally which might make unused code paths be better optimizable and so smaller and warn about unexpected usage. Signed-off-by: Uwe Kleine-König Message-Id: 1359646587-1788-2-git-send-email-u.kleine-koenig@pengutronix.de --- arch/arm/include/asm/cputype.h | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index a59dcb5..574269e 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -50,6 +50,7 @@ extern unsigned int processor_id; : "cc"); \ __val; \ }) + #define read_cpuid_ext(ext_reg) \ ({ \ unsigned int __val; \ @@ -59,12 +60,25 @@ extern unsigned int processor_id; : "cc"); \ __val; \ }) -#else -#define read_cpuid(reg) (processor_id) -#define read_cpuid_ext(reg) 0 -#endif + +#else /* ifdef CONFIG_CPU_CP15 */ /* + * read_cpuid and read_cpuid_ext should only ever be called on machines that + * have cp15 so warn on other usages. + */ +#define read_cpuid(reg) \ + ({ \ + WARN_ON_ONCE(1); \ + 0; \ + }) + +#define read_cpuid_ext(reg) read_cpuid(reg) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +#ifdef CONFIG_CPU_CP15 +/* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID * rather than directly reading processor_id or read_cpuid() directly. @@ -74,6 +88,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) return read_cpuid(CPUID_ID); } +#else /* ifdef CONFIG_CPU_CP15 */ + +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ + return processor_id; +} + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) { return read_cpuid(CPUID_CACHETYPE); -- cgit v1.1 From e1b5bb6d1236d4ad2084c53aa83dde7cdf6f8eea Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 21 Jan 2013 17:16:07 -0500 Subject: consolidate cond_syscall and SYSCALL_ALIAS declarations take them to asm/linkage.h, with default in linux/linkage.h Signed-off-by: Al Viro --- arch/arm/include/asm/unistd.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index e4ddfb3..141baa3 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -44,14 +44,6 @@ #define __ARCH_WANT_SYS_CLONE /* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") - -/* * Unimplemented (or alternatively implemented) syscalls */ #define __IGNORE_fadvise64_64 -- cgit v1.1 From db730d8d623a0826f7fb6b74e890d3eb97a1b7a3 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 3 Oct 2012 11:17:02 +0100 Subject: ARM: KVM: convert GP registers from u32 to unsigned long On 32bit ARM, unsigned long is guaranteed to be a 32bit quantity. On 64bit ARM, it is a 64bit quantity. In order to be able to share code between the two architectures, convert the registers to be unsigned long, so the core code can be oblivious of the change. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index fd61199..510488a 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -23,8 +23,8 @@ #include #include -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); -u32 *vcpu_spsr(struct kvm_vcpu *vcpu); +unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); +unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); @@ -37,14 +37,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) return 1; } -static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) { - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; + return &vcpu->arch.regs.usr_regs.ARM_pc; } -static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) { - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; + return &vcpu->arch.regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) -- cgit v1.1 From 7393b599177d301d4c9ca2c7f69a6849aba793c7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 17 Sep 2012 19:27:09 +0100 Subject: ARM: KVM: abstract fault register accesses Instead of directly accessing the fault registers, use proper accessors so the core code can be shared. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 21 +++++++++++++++++++++ arch/arm/include/asm/kvm_host.h | 14 ++++++++------ 2 files changed, 29 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 510488a..3c01988 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -22,6 +22,7 @@ #include #include #include +#include unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); @@ -69,4 +70,24 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) return reg == 15; } +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hsr; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hxfar; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ + return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; +} + +static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hyp_pc; +} + #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d1736a5..eb836e6 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -80,6 +80,13 @@ struct kvm_mmu_memory_cache { void *objects[KVM_NR_MEM_OBJS]; }; +struct kvm_vcpu_fault_info { + u32 hsr; /* Hyp Syndrome Register */ + u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ + u32 hpfar; /* Hyp IPA Fault Address Register */ + u32 hyp_pc; /* PC when exception was taken from Hyp mode */ +}; + struct kvm_vcpu_arch { struct kvm_regs regs; @@ -93,9 +100,7 @@ struct kvm_vcpu_arch { u32 midr; /* Exception Information */ - u32 hsr; /* Hyp Syndrome Register */ - u32 hxfar; /* Hyp Data/Inst Fault Address Register */ - u32 hpfar; /* Hyp IPA Fault Address Register */ + struct kvm_vcpu_fault_info fault; /* Floating point registers (VFP and Advanced SIMD/NEON) */ struct vfp_hard_struct vfp_guest; @@ -122,9 +127,6 @@ struct kvm_vcpu_arch { /* Interrupt related fields */ u32 irq_lines; /* IRQ and FIQ levels */ - /* Hyp exception information */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; -- cgit v1.1 From 4a1df28ac0ce6d76d336210d8c4216087c17de3a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 11:06:23 +0100 Subject: ARM: KVM: abstract HSR_ISV away Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 3c01988..ae9119e 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -90,4 +90,9 @@ static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) return vcpu->arch.fault.hyp_pc; } +static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From 023cc964063509ece283e0bed227114197ac14e0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 11:12:26 +0100 Subject: ARM: KVM: abstract HSR_WNR away Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index ae9119e..a0d76df 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -95,4 +95,9 @@ static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; } +static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From 7c511b881f79a2a390e7c2e9a39b9a881255e614 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 11:23:02 +0100 Subject: ARM: KVM: abstract HSR_SSE away Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index a0d76df..cd9cb87 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -100,4 +100,9 @@ static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; } +static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From d0adf747c9caa8b01d0c1f987e306b7c6aaa5a04 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 11:28:57 +0100 Subject: ARM: KVM: abstract HSR_SRT_{MASK,SHIFT} away Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index cd9cb87..d548078 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -105,4 +105,9 @@ static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; } +static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) +{ + return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From 78abfcde49e0e454cabf8e56cd4c1591752e2706 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 11:36:16 +0100 Subject: ARM: KVM: abstract (and fix) external abort detection away Bit 8 is cache maintenance, bit 9 is external abort. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_arm.h | 3 +++ arch/arm/include/asm/kvm_emulate.h | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 7c3d813..990764e 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -211,4 +211,7 @@ #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) +#define HSR_DABT_CM (1U << 8) +#define HSR_DABT_EA (1U << 9) + #endif /* __ARM_KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index d548078..2d1c585 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -110,4 +110,9 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; } +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From b37670b0f37d8015d0d428e6a63bd07397430a2f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 11:37:28 +0100 Subject: ARM: KVM: abstract S1TW abort detection away Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_arm.h | 1 + arch/arm/include/asm/kvm_emulate.h | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 990764e..124623e 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -211,6 +211,7 @@ #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) +#define HSR_DABT_S1PTW (1U << 7) #define HSR_DABT_CM (1U << 8) #define HSR_DABT_EA (1U << 9) diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 2d1c585..6b43653 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -115,4 +115,9 @@ static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; } +static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From a7123377e7107a5f4f7ae198aa5b5000c9362871 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 11:43:30 +0100 Subject: ARM: KVM: abstract SAS decoding away Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 6b43653..e4a4fc2 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -120,4 +120,20 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } +/* Get Access Size from a data abort */ +static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) +{ + switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { + case 0: + return 1; + case 1: + return 2; + case 2: + return 4; + default: + kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); + return -EFAULT; + } +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From 23b415d61a80c0c63f43dd2b3a08a1fa0b79b8ea Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 12:07:06 +0100 Subject: ARM: KVM: abstract IL decoding away Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index e4a4fc2..a9b853f 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -136,4 +136,10 @@ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) } } +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_IL; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From 4926d445eb76bec8ebd71f5ed9e9c94fd738014d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 14:09:58 +0100 Subject: ARM: KVM: abstract exception class decoding away Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index a9b853f..539f83a 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -142,4 +142,9 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_IL; } +static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From 1cc287dd081235e02cebd791f1e930ca6f422dcd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 18 Sep 2012 14:14:35 +0100 Subject: ARM: KVM: abstract fault decoding away Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 539f83a..021a59c 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -147,4 +147,9 @@ static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; } +static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From 52d1dba933f601d8d9e6373377377b12d6bcfac0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 15 Oct 2012 10:33:38 +0100 Subject: ARM: KVM: abstract HSR_EC_IABT away Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 021a59c..e59f8c0 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -147,6 +147,11 @@ static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; } +static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; +} + static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; -- cgit v1.1 From c5997563298bc1b9da5212c15544962d4dbbe27d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 8 Dec 2012 18:13:18 +0000 Subject: ARM: KVM: move kvm_condition_valid to emulate.c This is really hardware emulation, and as such it better be with its little friends. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index e59f8c0..e14268c 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -28,6 +28,7 @@ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); +bool kvm_condition_valid(struct kvm_vcpu *vcpu); void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); -- cgit v1.1 From 3414bbfff98b2524c0b2cdc4d0a78153ebf4f823 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 5 Oct 2012 11:11:11 +0100 Subject: ARM: KVM: move exit handler selection to a separate file The exit handler selection code cannot be shared with arm64 (two different modes, more exception classes...). Move it to a separate file (handle_exit.c). Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_host.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index eb836e6..24f457a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -183,4 +183,7 @@ struct kvm_one_reg; int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index); + #endif /* __ARM_KVM_HOST_H__ */ -- cgit v1.1 From c088f8f0088decf0ce172a6bb4a6f5742cc54c15 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Thu, 21 Feb 2013 11:26:10 -0800 Subject: KVM: ARM: Reintroduce trace_kvm_hvc This one got lost in the move to handle_exit, so let's reintroduce it using an accessor to the immediate value field like the other ones. Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index e14268c..9cb2fe1 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -158,4 +158,9 @@ static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; } +static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; +} + #endif /* __ARM_KVM_EMULATE_H__ */ -- cgit v1.1 From c62ee2b22798d7a5a8eb5f799b5183ef993ca7e4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 15 Oct 2012 11:27:37 +0100 Subject: ARM: KVM: abstract most MMU operations Move low level MMU-related operations to kvm_mmu.h. This makes the MMU code reusable by the arm64 port. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_mmu.h | 58 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 421a20b..ac78493 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -19,6 +19,9 @@ #ifndef __ARM_KVM_MMU_H__ #define __ARM_KVM_MMU_H__ +#include +#include + int create_hyp_mappings(void *from, void *to); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_hyp_pmds(void); @@ -36,6 +39,16 @@ phys_addr_t kvm_mmu_get_httbr(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); +static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) +{ + pte_val(*pte) = new_pte; + /* + * flush_pmd_entry just takes a void pointer and cleans the necessary + * cache entries, so we can reuse the function for ptes. + */ + flush_pmd_entry(pte); +} + static inline bool kvm_is_write_fault(unsigned long hsr) { unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; @@ -47,4 +60,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr) return true; } +static inline void kvm_clean_pgd(pgd_t *pgd) +{ + clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); +} + +static inline void kvm_clean_pmd_entry(pmd_t *pmd) +{ + clean_pmd_entry(pmd); +} + +static inline void kvm_clean_pte(pte_t *pte) +{ + clean_pte_table(pte); +} + +static inline void kvm_set_s2pte_writable(pte_t *pte) +{ + pte_val(*pte) |= L_PTE_S2_RDWR; +} + +struct kvm; + +static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +{ + /* + * If we are going to insert an instruction page and the icache is + * either VIPT or PIPT, there is a potential problem where the host + * (or another VM) may have used the same page as this guest, and we + * read incorrect data from the icache. If we're using a PIPT cache, + * we can invalidate just that page, but if we are using a VIPT cache + * we need to invalidate the entire icache - damn shame - as written + * in the ARM ARM (DDI 0406C.b - Page B3-1393). + * + * VIVT caches are tagged using both the ASID and the VMID and doesn't + * need any kind of flushing (DDI 0406C.b - Page B3-1392). + */ + if (icache_is_pipt()) { + unsigned long hva = gfn_to_hva(kvm, gfn); + __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); + } else if (!icache_is_vivt_asid_tagged()) { + /* any kind of VIPT cache */ + __flush_icache_all(); + } +} + #endif /* __ARM_KVM_MMU_H__ */ -- cgit v1.1 From 629dc446a49a1c3b4f31a3fdc2c814a7db6c6199 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 15 Oct 2012 14:29:47 +0100 Subject: ARM: KVM: remove superfluous include from kvm_vgic.h Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_vgic.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index ab97207..343744e 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h @@ -21,7 +21,6 @@ #include #include -#include #include #include #include -- cgit v1.1 From e7858c58d52237a4519e2fdb1ce8f2d9805ce0ce Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 5 Oct 2012 15:10:44 +0100 Subject: ARM: KVM: move hyp init to kvm_host.h Make the split of the pgd_ptr an implementation specific thing by moving the init call to an inline function. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 24f457a..f00a557 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -186,4 +186,23 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index); +static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr, + unsigned long hyp_stack_ptr, + unsigned long vector_ptr) +{ + unsigned long pgd_low, pgd_high; + + pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); + pgd_high = (pgd_ptr >> 32ULL); + + /* + * Call initialization code, and switch to the full blown + * HYP code. The init code doesn't need to preserve these registers as + * r1-r3 and r12 are already callee save according to the AAPCS. + * Note that we slightly misuse the prototype by casing the pgd_low to + * a void *. + */ + kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); +} + #endif /* __ARM_KVM_HOST_H__ */ -- cgit v1.1 From 9c7a6432fb081563f084b25bbd2774b1547c4fad Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 27 Oct 2012 18:23:25 +0100 Subject: ARM: KVM: use kvm_kernel_vfp_t as an abstract type for VFP containers In order to keep the VFP allocation code common, use an abstract type for the VFP containers. Maps onto struct vfp_hard_struct on ARM. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f00a557..0c4e643 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -87,6 +87,8 @@ struct kvm_vcpu_fault_info { u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; +typedef struct vfp_hard_struct kvm_kernel_vfp_t; + struct kvm_vcpu_arch { struct kvm_regs regs; @@ -103,8 +105,8 @@ struct kvm_vcpu_arch { struct kvm_vcpu_fault_info fault; /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - struct vfp_hard_struct *vfp_host; + kvm_kernel_vfp_t vfp_guest; + kvm_kernel_vfp_t *vfp_host; /* VGIC state */ struct vgic_cpu vgic_cpu; -- cgit v1.1 From 06e8c3b0f3210e5e7039fd2b5e3926b68df7f5d7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 28 Oct 2012 01:09:14 +0100 Subject: ARM: KVM: allow HYP mappings to be at an offset from kernel mappings arm64 cannot represent the kernel VAs in HYP mode, because of the lack of TTBR1 at EL2. A way to cope with this situation is to have HYP VAs to be an offset from the kernel VAs. Introduce macros to convert a kernel VA to a HYP VA, make the HYP mapping functions use these conversion macros. Also change the documentation to reflect the existence of the offset. On ARM, where we can have an identity mapping between kernel and HYP, the macros are without any effect. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_mmu.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index ac78493..3c71a1d 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -22,6 +22,14 @@ #include #include +/* + * We directly use the kernel VA for the HYP, as we can directly share + * the mapping (HTTBR "covers" TTBR1). + */ +#define HYP_PAGE_OFFSET_MASK (~0UL) +#define HYP_PAGE_OFFSET PAGE_OFFSET +#define KERN_TO_HYP(kva) (kva) + int create_hyp_mappings(void *from, void *to); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_hyp_pmds(void); -- cgit v1.1 From 06fe0b73ff17e5d777af1b26f3e227d79c0d6808 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 17 Dec 2012 11:57:24 +0000 Subject: ARM: KVM: move include of asm/idmap.h to kvm_mmu.h Since the arm64 code doesn't have a global asm/idmap.h file, move the inclusion to asm/kvm_mmu.h. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_mmu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 3c71a1d..970f3b5 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -21,6 +21,7 @@ #include #include +#include /* * We directly use the kernel VA for the HYP, as we can directly share -- cgit v1.1 From 48762767e1c150d58c250650f8202b7d4ad65ec4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 28 Jan 2013 15:27:00 +0000 Subject: ARM: KVM: change kvm_tlb_flush_vmid to kvm_tlb_flush_vmid_ipa v8 is capable of invalidating Stage-2 by IPA, but v7 is not. Change kvm_tlb_flush_vmid() to take an IPA parameter, which is then ignored by the invalidation code (and nuke the whole TLB as it always did). This allows v8 to implement a more optimized strategy. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index e4956f4..18d5032 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[]; extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); +extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); #endif -- cgit v1.1 From 6190920a35068a621570cca7f07f3c4477615176 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 5 Oct 2012 13:42:45 +0100 Subject: ARM: KVM: move kvm_handle_wfi to handle_exit.c It has little to do in emulate.c these days... Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 9cb2fe1..82b4bab 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -27,7 +27,6 @@ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); bool kvm_condition_valid(struct kvm_vcpu *vcpu); void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); -- cgit v1.1 From da4a686a2cfb077a8bfc1697f597e7f86235b822 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 6 Feb 2013 21:17:47 -0600 Subject: ARM: smp_twd: convert to use CLKSRC_OF init Now that we have OF based init with CLKSRC_OF, convert smp_twd init function to use it and covert all callers of twd_local_timer_of_register. Signed-off-by: Rob Herring Cc: Shawn Guo Cc: Sascha Hauer Cc: Russell King Cc: Viresh Kumar Cc: Shiraz Hashim Cc: Srinidhi Kasagar Cc: John Stultz Cc: Thomas Gleixner Cc: linux-omap@vger.kernel.org Cc: spear-devel@list.st.com Reviewed-by: Stephen Warren Acked-by: Santosh Shilimkar Acked-by: Tony Lindgren Acked-by: Linus Walleij --- arch/arm/include/asm/smp_twd.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index 0f01f46..7b2899c 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h @@ -34,12 +34,4 @@ struct twd_local_timer name __initdata = { \ int twd_local_timer_register(struct twd_local_timer *); -#ifdef CONFIG_HAVE_ARM_TWD -void twd_local_timer_of_register(void); -#else -static inline void twd_local_timer_of_register(void) -{ -} -#endif - #endif -- cgit v1.1 From 4cc3daaf3976bde5345718e86b67cace2f935a09 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Jan 2013 20:48:55 +0000 Subject: ARM: tlbflush: remove ARMv3 support We no longer support any ARMv3 platforms, so remove the old tlbflushing code. Signed-off-by: Will Deacon --- arch/arm/include/asm/tlbflush.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c88..a223003 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@ #include -#define TLB_V3_PAGE (1 << 0) #define TLB_V4_U_PAGE (1 << 1) #define TLB_V4_D_PAGE (1 << 2) #define TLB_V4_I_PAGE (1 << 3) @@ -22,7 +21,6 @@ #define TLB_V6_D_PAGE (1 << 5) #define TLB_V6_I_PAGE (1 << 6) -#define TLB_V3_FULL (1 << 8) #define TLB_V4_U_FULL (1 << 9) #define TLB_V4_D_FULL (1 << 10) #define TLB_V4_I_FULL (1 << 11) @@ -52,7 +50,6 @@ * ============= * * We have the following to choose from: - * v3 - ARMv3 * v4 - ARMv4 without write buffer * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); -- cgit v1.1 From 82d9b0d0c6c4cbd5d0022d7df5e6bf071a9eb6c7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Jan 2013 12:07:40 +0000 Subject: ARM: cache: remove ARMv3 support code This is only used by 740t, which is a v4 core and (by my reading of the datasheet for the CPU) ignores CRm for the cp15 cache flush operation, making the v4 cache implementation in cache-v4.S sufficient for this CPU. Tested with 740T core-tile on Integrator/AP baseboard. Acked-by: Hyok S. Choi Acked-by: Greg Ungerer Signed-off-by: Will Deacon --- arch/arm/include/asm/glue-cache.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15..ea289e1 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - #if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 -- cgit v1.1 From f36a3bb1a1b73a60f4df1d4c38c686cc173f50b3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 18 Jan 2013 15:20:06 +0000 Subject: arm: Move the set_handle_irq and handle_arch_irq declarations to asm/irq.h This patch prepares the removal of include in the GIC and VIC irqchip drivers. Signed-off-by: Catalin Marinas Tested-by: Marc Zyngier Cc: Russell King Cc: Thomas Gleixner Cc: Rob Herring --- arch/arm/include/asm/irq.h | 5 +++++ arch/arm/include/asm/mach/irq.h | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 35c21c3..53c15de 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -30,6 +30,11 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); void init_IRQ(void); +#ifdef CONFIG_MULTI_IRQ_HANDLER +extern void (*handle_arch_irq)(struct pt_regs *); +extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); +#endif + #endif #endif diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 18c8830..749d505 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -20,11 +20,6 @@ struct seq_file; extern void init_FIQ(int); extern int show_fiq_list(struct seq_file *, int); -#ifdef CONFIG_MULTI_IRQ_HANDLER -extern void (*handle_arch_irq)(struct pt_regs *); -extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); -#endif - /* * This is for easy migration, but should be changed in the source */ -- cgit v1.1 From de88cbb7b244f3bcd61d49fd6dec35c19192545a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 18 Jan 2013 15:31:37 +0000 Subject: arm: Move chained_irq_(enter|exit) to a generic file These functions have been introduced by commit 10a8c383 (irq: introduce entry and exit functions for chained handlers) in asm/mach/irq.h. This patch moves them to linux/irqchip/chained_irq.h so that generic irqchip drivers do not rely on architecture specific header files. Signed-off-by: Catalin Marinas Tested-by: Marc Zyngier Cc: Russell King Cc: Thomas Gleixner Cc: Rob Herring --- arch/arm/include/asm/mach/irq.h | 31 ------------------------------- 1 file changed, 31 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 749d505..2092ee1 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -30,35 +30,4 @@ do { \ raw_spin_unlock(&desc->lock); \ } while(0) -#ifndef __ASSEMBLY__ -/* - * Entry/exit functions for chained handlers where the primary IRQ chip - * may implement either fasteoi or level-trigger flow control. - */ -static inline void chained_irq_enter(struct irq_chip *chip, - struct irq_desc *desc) -{ - /* FastEOI controllers require no action on entry. */ - if (chip->irq_eoi) - return; - - if (chip->irq_mask_ack) { - chip->irq_mask_ack(&desc->irq_data); - } else { - chip->irq_mask(&desc->irq_data); - if (chip->irq_ack) - chip->irq_ack(&desc->irq_data); - } -} - -static inline void chained_irq_exit(struct irq_chip *chip, - struct irq_desc *desc) -{ - if (chip->irq_eoi) - chip->irq_eoi(&desc->irq_data); - else - chip->irq_unmask(&desc->irq_data); -} -#endif - #endif -- cgit v1.1 From 93dc68876b608da041fe40ed39424b0fcd5aa2fb Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 26 Mar 2013 23:35:04 +0100 Subject: ARM: 7684/1: errata: Workaround for Cortex-A15 erratum 798181 (TLBI/DSB operations) On Cortex-A15 (r0p0..r3p2) the TLBI/DSB are not adequately shooting down all use of the old entries. This patch implements the erratum workaround which consists of: 1. Dummy TLBIMVAIS and DSB on the CPU doing the TLBI operation. 2. Send IPI to the CPUs that are running the same mm (and ASID) as the one being invalidated (or all the online CPUs for global pages). 3. CPU receiving the IPI executes a DMB and CLREX (part of the exception return code already). Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/highmem.h | 7 +++++++ arch/arm/include/asm/mmu_context.h | 2 ++ arch/arm/include/asm/tlbflush.h | 15 +++++++++++++++ 3 files changed, 24 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828..91b99ab 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a661..a7b85e0 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); + #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c88..9e9c041 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void) isb(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * -- cgit v1.1 From 6f3d90e55660ba42301b5e9c7eed332cc9f70fd7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Mar 2013 11:17:55 +0100 Subject: ARM: 7685/1: delay: use private ticks_per_jiffy field for timer-based delay ops Commit 70264367a243 ("ARM: 7653/2: do not scale loops_per_jiffy when using a constant delay clock") fixed a problem with our timer-based delay loop, where loops_per_jiffy is scaled by cpufreq yet used directly by the timer delay ops. This patch fixes the problem in a more elegant way by keeping a private ticks_per_jiffy field in the delay ops, independent of loops_per_jiffy and therefore not subject to scaling. The loop-based delay continues to use loops_per_jiffy directly, as it should. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/delay.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799f..dff714d 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) -- cgit v1.1 From 029baf14a027a44b3ac8a9fe5cb1e516cbb9007e Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 26 Mar 2013 18:14:24 +0100 Subject: ARM: 7683/1: pci: add a align_resource hook The PCI specifications says that an I/O region must be aligned on a 4 KB boundary, and a memory region aligned on a 1 MB boundary. However, the Marvell PCIe interfaces rely on address decoding windows (which allow to associate a range of physical addresses with a given device). For PCIe memory windows, those windows are defined with a 1 MB granularity (which matches the PCI specs), but PCIe I/O windows can only be defined with a 64 KB granularity, so they have to be 64 KB aligned. We therefore need to tell the PCI core about this special alignement requirement. The PCI core already calls pcibios_align_resource() in the ARM PCI core, specifically for such purposes. So this patch extends the ARM PCI core so that it calls a ->align_resource() hook registered by the PCI driver, exactly like the existing ->map_irq() and ->swizzle() hooks. A particular PCI driver can register a align_resource() hook, and do its own specific alignement, depending on the specific constraints of the underlying hardware. Signed-off-by: Thomas Petazzoni Signed-off-by: Russell King --- arch/arm/include/asm/mach/pci.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 5cf2e97..7d2c3c8 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -30,6 +30,11 @@ struct hw_pci { void (*postinit)(void); u8 (*swizzle)(struct pci_dev *dev, u8 *pin); int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); }; /* @@ -51,6 +56,12 @@ struct pci_sys_data { u8 (*swizzle)(struct pci_dev *, u8 *); /* IRQ mapping */ int (*map_irq)(const struct pci_dev *, u8, u8); + /* Resource alignement requirements */ + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); void *private_data; /* platform controller private data */ }; -- cgit v1.1 From 4fd75911f6970478444eece952dacb2db9d1e5a5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Mar 2013 11:25:03 +0100 Subject: ARM: 7687/1: atomics: don't use exclusives for atomic64 read/set with LPAE To ease page table updates with 64-bit descriptors, CPUs implementing LPAE are required to implement ldrd/strd as atomic operations. This patch uses these accessors instead of the exclusive variants when performing atomic64_{read,set} on LPAE systems. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/atomic.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index c79f61f..da1c77d 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -243,6 +243,29 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } +#ifdef CONFIG_ARM_LPAE +static inline u64 atomic64_read(const atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter), "Qo" (v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + __asm__ __volatile__("@ atomic64_set\n" +" strd %2, %H2, [%1]" + : "=Qo" (v->counter) + : "r" (&v->counter), "r" (i) + ); +} +#else static inline u64 atomic64_read(const atomic64_t *v) { u64 result; @@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i) : "r" (&v->counter), "r" (i) : "cc"); } +#endif static inline void atomic64_add(u64 i, atomic64_t *v) { -- cgit v1.1 From b00884802043d9102ecc2abfdc37a7b35b30e52a Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Thu, 28 Mar 2013 22:54:40 +0100 Subject: ARM: 7688/1: add support for context tracking subsystem commit 91d1aa43 (context_tracking: New context tracking susbsystem) generalized parts of the RCU userspace extended quiescent state into the context tracking subsystem. Context tracking is then used to implement adaptive tickless (a.k.a extended nohz) To support the new context tracking subsystem on ARM, the user/kernel boundary transtions need to be instrumented. For exceptions and IRQs in usermode, the existing usr_entry macro is used to instrument the user->kernel transition. For the return to usermode path, the ret_to_user* path is instrumented. Using the usr_entry macro, this covers interrupts in userspace, data abort and prefetch abort exceptions in userspace as well as undefined exceptions in userspace (which is where FP emulation and VFP are handled.) For syscalls, the slow return path is covered by instrumenting the ret_to_user path. In addition, the syscall entry point is instrumented which covers the user->kernel transition for both fast and slow syscalls, and an additional instrumentation point is added for the fast syscall return path (ret_fast_syscall). Cc: Mats Liljegren Cc: Frederic Weisbecker Signed-off-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index cddda1f..1995d1a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ +#define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 -- cgit v1.1 From ae8a8b9553bd3906af74ff4e8d763904d20ab4e5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 3 Apr 2013 17:16:57 +0100 Subject: ARM: 7691/1: mm: kill unused TLB_CAN_READ_FROM_L1_CACHE and use ALT_SMP instead Many ARMv7 cores have hardware page table walkers that can read the L1 cache. This is discoverable from the ID_MMFR3 register, although this can be expensive to access from the low-level set_pte functions and is a pain to cache, particularly with multi-cluster systems. A useful observation is that the multi-processing extensions for ARMv7 require coherent table walks, meaning that we can make use of ALT_SMP patching in proc-v7-* to patch away the cache flush safely for these cores. Reported-by: Albin Tonnerre Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/tlbflush.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c88..7a3e48d 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -169,7 +169,7 @@ # define v6wbi_always_flags (-1UL) #endif -#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ TLB_V7_UIS_ASID | TLB_V7_UIS_BP) #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ -- cgit v1.1 From f7b861b7a6d9d1838cbbb5f4053e61578b86d134 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Mar 2013 22:49:38 +0100 Subject: arm: Use generic idle loop Use the generic idle loop and replace enable/disable_hlt with the respective core functions. Signed-off-by: Thomas Gleixner Cc: Linus Torvalds Cc: Rusty Russell Cc: Paul McKenney Cc: Peter Zijlstra Reviewed-by: Cc: Srivatsa S. Bhat Cc: Magnus Damm Cc: Russell King Tested-by: Kevin Hilman # OMAP Link: http://lkml.kernel.org/r/20130321215233.826238797@linutronix.de --- arch/arm/include/asm/system_misc.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 5a85f14..21a23e3 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void); extern unsigned int user_debug; -extern void disable_hlt(void); -extern void enable_hlt(void); - #endif /* !__ASSEMBLY__ */ #endif /* __ASM_ARM_SYSTEM_MISC_H */ -- cgit v1.1 From 7366b92a77fc00357294819bb495896d7482f30c Mon Sep 17 00:00:00 2001 From: Tomasz Figa Date: Tue, 11 Dec 2012 13:58:43 +0900 Subject: ARM: Add interface for registering and calling firmware-specific operations Some boards are running with secure firmware running in TrustZone secure world, which changes the way some things have to be initialized. This patch adds an interface for platforms to specify available firmware operations and call them. A wrapper macro, call_firmware_op(), checks if the operation is provided and calls it if so, otherwise returns -ENOSYS to allow fallback to legacy operation.. By default no operations are provided. Example of use: In code using firmware ops: __raw_writel(virt_to_phys(exynos4_secondary_startup), CPU1_BOOT_REG); /* Call Exynos specific smc call */ if (call_firmware_op(cpu_boot, cpu) == -ENOSYS) cpu_boot_legacy(...); /* Try legacy way */ gic_raise_softirq(cpumask_of(cpu), 1); In board-/platform-specific code: static int platformX_do_idle(void) { /* tell platformX firmware to enter idle */ return 0; } static int platformX_cpu_boot(int i) { /* tell platformX firmware to boot CPU i */ return 0; } static const struct firmware_ops platformX_firmware_ops = { .do_idle = exynos_do_idle, .cpu_boot = exynos_cpu_boot, /* other operations not available on platformX */ }; static void __init board_init_early(void) { register_firmware_ops(&platformX_firmware_ops); } Signed-off-by: Kyungmin Park Signed-off-by: Tomasz Figa Signed-off-by: Kukjin Kim --- arch/arm/include/asm/firmware.h | 66 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 arch/arm/include/asm/firmware.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h new file mode 100644 index 0000000..1563130 --- /dev/null +++ b/arch/arm/include/asm/firmware.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2012 Samsung Electronics. + * Kyungmin Park + * Tomasz Figa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_FIRMWARE_H +#define __ASM_ARM_FIRMWARE_H + +#include + +/* + * struct firmware_ops + * + * A structure to specify available firmware operations. + * + * A filled up structure can be registered with register_firmware_ops(). + */ +struct firmware_ops { + /* + * Enters CPU idle mode + */ + int (*do_idle)(void); + /* + * Sets boot address of specified physical CPU + */ + int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr); + /* + * Boots specified physical CPU + */ + int (*cpu_boot)(int cpu); + /* + * Initializes L2 cache + */ + int (*l2x0_init)(void); +}; + +/* Global pointer for current firmware_ops structure, can't be NULL. */ +extern const struct firmware_ops *firmware_ops; + +/* + * call_firmware_op(op, ...) + * + * Checks if firmware operation is present and calls it, + * otherwise returns -ENOSYS + */ +#define call_firmware_op(op, ...) \ + ((firmware_ops->op) ? firmware_ops->op(__VA_ARGS__) : (-ENOSYS)) + +/* + * register_firmware_ops(ops) + * + * A function to register platform firmware_ops struct. + */ +static inline void register_firmware_ops(const struct firmware_ops *ops) +{ + BUG_ON(!ops); + + firmware_ops = ops; +} + +#endif -- cgit v1.1 From 865499ea90d399e0682bcce3ae7af24277633699 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 12 Apr 2013 14:00:16 +0100 Subject: ARM: KVM: fix L_PTE_S2_RDWR to actually be Read/Write Looks like our L_PTE_S2_RDWR definition is slightly wrong, and is actually write only (see ARM ARM Table B3-9, Stage 2 control of access permissions). Didn't make a difference for normal pages, as we OR the flags together, but I'm still wondering how it worked for Stage-2 mapped devices, such as the GIC. Brown paper bag time, again. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/pgtable-3level.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd..86b8fe3 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@ #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. -- cgit v1.1 From f5d6a1441a5045824f36ff7c6b6bbae0373472a6 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Wed, 3 Apr 2013 22:28:41 +0100 Subject: ARM: 7692/1: iop3xx: move IOP3XX_PERIPHERAL_VIRT_BASE Currently IOP3XX_PERIPHERAL_VIRT_BASE conflicts with PCI_IO_VIRT_BASE: address size PCI_IO_VIRT_BASE 0xfee00000 0x200000 IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 0x2000 Fix by moving IOP3XX_PERIPHERAL_VIRT_BASE below PCI_IO_VIRT_BASE. The patch fixes the following kernel panic with 3.9-rc1 on iop3xx boards: [ 0.000000] Booting Linux on physical CPU 0x0 [ 0.000000] Initializing cgroup subsys cpu [ 0.000000] Linux version 3.9.0-rc1-iop32x (aaro@blackmetal) (gcc version 4.7.2 (GCC) ) #20 PREEMPT Tue Mar 5 16:44:36 EET 2013 [ 0.000000] bootconsole [earlycon0] enabled [ 0.000000] ------------[ cut here ]------------ [ 0.000000] kernel BUG at mm/vmalloc.c:1145! [ 0.000000] Internal error: Oops - BUG: 0 [#1] PREEMPT ARM [ 0.000000] Modules linked in: [ 0.000000] CPU: 0 Not tainted (3.9.0-rc1-iop32x #20) [ 0.000000] PC is at vm_area_add_early+0x4c/0x88 [ 0.000000] LR is at add_static_vm_early+0x14/0x68 [ 0.000000] pc : [] lr : [] psr: 800000d3 [ 0.000000] sp : c03ffee4 ip : dfffdf88 fp : c03ffef4 [ 0.000000] r10: 00000002 r9 : 000000cf r8 : 00000653 [ 0.000000] r7 : c040eca8 r6 : c03e2408 r5 : dfffdf60 r4 : 00200000 [ 0.000000] r3 : dfffdfd8 r2 : feffe000 r1 : ff000000 r0 : dfffdf60 [ 0.000000] Flags: Nzcv IRQs off FIQs off Mode SVC_32 ISA ARM Segment kernel [ 0.000000] Control: 0000397f Table: a0004000 DAC: 00000017 [ 0.000000] Process swapper (pid: 0, stack limit = 0xc03fe1b8) [ 0.000000] Stack: (0xc03ffee4 to 0xc0400000) [ 0.000000] fee0: 00200000 c03fff0c c03ffef8 c03e1c40 c03e7468 00200000 fee00000 [ 0.000000] ff00: c03fff2c c03fff10 c03e23e4 c03e1c38 feffe000 c0408ee4 ff000000 c0408f04 [ 0.000000] ff20: c03fff3c c03fff30 c03e2434 c03e23b4 c03fff84 c03fff40 c03e2c94 c03e2414 [ 0.000000] ff40: c03f8878 c03f6410 ffff0000 000bffff 00001000 00000008 c03fff84 c03f6410 [ 0.000000] ff60: c04227e8 c03fffd4 a0008000 c03f8878 69052e30 c02f96eb c03fffbc c03fff88 [ 0.000000] ff80: c03e044c c03e268c 00000000 0000397f c0385130 00000001 ffffffff c03f8874 [ 0.000000] ffa0: dfffffff a0004000 69052e30 a03f61a0 c03ffff4 c03fffc0 c03dd5cc c03e0184 [ 0.000000] ffc0: 00000000 00000000 00000000 00000000 00000000 c03f8878 0000397d c040601c [ 0.000000] ffe0: c03f8874 c0408674 00000000 c03ffff8 a0008040 c03dd558 00000000 00000000 [ 0.000000] Backtrace: [ 0.000000] [] (vm_area_add_early+0x0/0x88) from [] (add_static_vm_early+0x14/0x68) Tested-by: Mikael Pettersson Signed-off-by: Aaro Koskinen Signed-off-by: Russell King --- arch/arm/include/asm/hardware/iop3xx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fb..ed94b1a 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) -- cgit v1.1 From 0c91e7e07ebf08092bf8e28d8cd8d420732fc716 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 23 Apr 2013 16:45:40 -0400 Subject: ARM: cacheflush: add synchronization helpers for mixed cache state accesses Algorithms used by the MCPM layer rely on state variables which are accessed while the cache is either active or inactive, depending on the code path and the active state. This patch introduces generic cache maintenance helpers to provide the necessary cache synchronization for such state variables to always hit main memory in an ordered way. Signed-off-by: Nicolas Pitre Acked-by: Russell King Acked-by: Dave Martin --- arch/arm/include/asm/cacheflush.h | 75 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e1489c5..bff7138 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) flush_cache_all(); } +/* + * Memory synchronization helpers for mixed cached vs non cached accesses. + * + * Some synchronization algorithms have to set states in memory with the + * cache enabled or disabled depending on the code path. It is crucial + * to always ensure proper cache maintenance to update main memory right + * away in that case. + * + * Any cached write must be followed by a cache clean operation. + * Any cached read must be preceded by a cache invalidate operation. + * Yet, in the read case, a cache flush i.e. atomic clean+invalidate + * operation is needed to avoid discarding possible concurrent writes to the + * accessed memory. + * + * Also, in order to prevent a cached writer from interfering with an + * adjacent non-cached writer, each state variable must be located to + * a separate cache line. + */ + +/* + * This needs to be >= the max cache writeback size of all + * supported platforms included in the current kernel configuration. + * This is used to align state variables to their own cache lines. + */ +#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ +#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) + +/* + * There is no __cpuc_clean_dcache_area but we use it anyway for + * code intent clarity, and alias it to __cpuc_flush_dcache_area. + */ +#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area + +/* + * Ensure preceding writes to *p by this CPU are visible to + * subsequent reads by other CPUs: + */ +static inline void __sync_cache_range_w(volatile void *p, size_t size) +{ + char *_p = (char *)p; + + __cpuc_clean_dcache_area(_p, size); + outer_clean_range(__pa(_p), __pa(_p + size)); +} + +/* + * Ensure preceding writes to *p by other CPUs are visible to + * subsequent reads by this CPU. We must be careful not to + * discard data simultaneously written by another CPU, hence the + * usage of flush rather than invalidate operations. + */ +static inline void __sync_cache_range_r(volatile void *p, size_t size) +{ + char *_p = (char *)p; + +#ifdef CONFIG_OUTER_CACHE + if (outer_cache.flush_range) { + /* + * Ensure dirty data migrated from other CPUs into our cache + * are cleaned out safely before the outer cache is cleaned: + */ + __cpuc_clean_dcache_area(_p, size); + + /* Clean and invalidate stale data for *p from outer ... */ + outer_flush_range(__pa(_p), __pa(_p + size)); + } +#endif + + /* ... and inner cache: */ + __cpuc_flush_dcache_area(_p, size); +} + +#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) +#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) + #endif -- cgit v1.1 From e8db288e05e588ad3f416b3a24354d60d02f35f2 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 12 Apr 2012 02:45:22 -0400 Subject: ARM: multi-cluster PM: secondary kernel entry code CPUs in cluster based systems, such as big.LITTLE, have special needs when entering the kernel due to a hotplug event, or when resuming from a deep sleep mode. This is vectorized so multiple CPUs can enter the kernel in parallel without serialization. The mcpm prefix stands for "multi cluster power management", however this is usable on single cluster systems as well. Only the basic structure is introduced here. This will be extended with later patches. In order not to complexify things more than they currently have to, the planned work to make runtime adjusted MPIDR based indexing and dynamic memory allocation for cluster states is postponed to a later cycle. The MAX_NR_CLUSTERS and MAX_CPUS_PER_CLUSTER static definitions should be sufficient for those systems expected to be available in the near future. Signed-off-by: Nicolas Pitre Reviewed-by: Santosh Shilimkar Reviewed-by: Will Deacon --- arch/arm/include/asm/mcpm.h | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 arch/arm/include/asm/mcpm.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h new file mode 100644 index 0000000..470a417 --- /dev/null +++ b/arch/arm/include/asm/mcpm.h @@ -0,0 +1,42 @@ +/* + * arch/arm/include/asm/mcpm.h + * + * Created by: Nicolas Pitre, April 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MCPM_H +#define MCPM_H + +/* + * Maximum number of possible clusters / CPUs per cluster. + * + * This should be sufficient for quite a while, while keeping the + * (assembly) code simpler. When this starts to grow then we'll have + * to consider dynamic allocation. + */ +#define MAX_CPUS_PER_CLUSTER 4 +#define MAX_NR_CLUSTERS 2 + +#ifndef __ASSEMBLY__ + +/* + * Platform specific code should use this symbol to set up secondary + * entry location for processors to use when released from reset. + */ +extern void mcpm_entry_point(void); + +/* + * This is used to indicate where the given CPU from given cluster should + * branch once it is ready to re-enter the kernel using ptr, or NULL if it + * should be gated. A gated CPU is held in a WFE loop until its vector + * becomes non NULL. + */ +void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); + +#endif /* ! __ASSEMBLY__ */ +#endif -- cgit v1.1 From 7c2b860534d02d11923dd0504b961f21508173f1 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 20 Sep 2012 16:05:37 -0400 Subject: ARM: mcpm: introduce the CPU/cluster power API This is the basic API used to handle the powering up/down of individual CPUs in a (multi-)cluster system. The platform specific backend implementation has the responsibility to also handle the cluster level power as well when the first/last CPU in a cluster is brought up/down. Signed-off-by: Nicolas Pitre Reviewed-by: Santosh Shilimkar Reviewed-by: Will Deacon --- arch/arm/include/asm/mcpm.h | 92 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 470a417..627761f 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -38,5 +38,97 @@ extern void mcpm_entry_point(void); */ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); +/* + * CPU/cluster power operations API for higher subsystems to use. + */ + +/** + * mcpm_cpu_power_up - make given CPU in given cluster runable + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * The identified CPU is brought out of reset. If the cluster was powered + * down then it is brought up as well, taking care not to let the other CPUs + * in the cluster run, and ensuring appropriate cluster setup. + * + * Caller must ensure the appropriate entry vector is initialized with + * mcpm_set_entry_vector() prior to calling this. + * + * This must be called in a sleepable context. However, the implementation + * is strongly encouraged to return early and let the operation happen + * asynchronously, especially when significant delays are expected. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); + +/** + * mcpm_cpu_power_down - power the calling CPU down + * + * The calling CPU is powered down. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster is prepared for power-down too. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_power_down(void); + +/** + * mcpm_cpu_suspend - bring the calling CPU in a suspended state + * + * @expected_residency: duration in microseconds the CPU is expected + * to remain suspended, or 0 if unknown/infinity. + * + * The calling CPU is suspended. The expected residency argument is used + * as a hint by the platform specific backend to implement the appropriate + * sleep state level according to the knowledge it has on wake-up latency + * for the given hardware. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster may be prepared for power-down too, if the expected + * residency makes it worthwhile. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_suspend(u64 expected_residency); + +/** + * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up + * + * This lets the platform specific backend code perform needed housekeeping + * work. This must be called by the newly activated CPU as soon as it is + * fully operational in kernel space, before it enables interrupts. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_powered_up(void); + +/* + * Platform specific methods used in the implementation of the above API. + */ +struct mcpm_platform_ops { + int (*power_up)(unsigned int cpu, unsigned int cluster); + void (*power_down)(void); + void (*suspend)(u64); + void (*powered_up)(void); +}; + +/** + * mcpm_platform_register - register platform specific power methods + * + * @ops: mcpm_platform_ops structure to register + * + * An error is returned if the registration has been done previously. + */ +int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); + #endif /* ! __ASSEMBLY__ */ #endif -- cgit v1.1 From 7fe31d28e839f9565c8176ec584676a045970802 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 17 Jul 2012 14:25:42 +0100 Subject: ARM: mcpm: introduce helpers for platform coherency exit/setup This provides helper methods to coordinate between CPUs coming down and CPUs going up, as well as documentation on the used algorithms, so that cluster teardown and setup operations are not done for a cluster simultaneously. For use in the power_down() implementation: * __mcpm_cpu_going_down(unsigned int cluster, unsigned int cpu) * __mcpm_outbound_enter_critical(unsigned int cluster) * __mcpm_outbound_leave_critical(unsigned int cluster) * __mcpm_cpu_down(unsigned int cluster, unsigned int cpu) The power_up_setup() helper should do platform-specific setup in preparation for turning the CPU on, such as invalidating local caches or entering coherency. It must be assembler for now, since it must run before the MMU can be switched on. It is passed the affinity level for which initialization should be performed. Because the mcpm_sync_struct content is looked-up and modified with the cache enabled or disabled depending on the code path, it is crucial to always ensure proper cache maintenance to update main memory right away. The sync_cache_*() helpers are used to that end. Also, in order to prevent a cached writer from interfering with an adjacent non-cached writer, we ensure each state variable is located to a separate cache line. Thanks to Nicolas Pitre and Achin Gupta for the help with this patch. Signed-off-by: Dave Martin Signed-off-by: Nicolas Pitre Reviewed-by: Will Deacon --- arch/arm/include/asm/mcpm.h | 73 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 627761f..3046e90 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -24,6 +24,9 @@ #ifndef __ASSEMBLY__ +#include +#include + /* * Platform specific code should use this symbol to set up secondary * entry location for processors to use when released from reset. @@ -130,5 +133,75 @@ struct mcpm_platform_ops { */ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); +/* Synchronisation structures for coordinating safe cluster setup/teardown: */ + +/* + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + +extern unsigned long sync_phys; /* physical address of *mcpm_sync */ + +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); +void __mcpm_outbound_leave_critical(unsigned int cluster, int state); +bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); +int __mcpm_cluster_state(unsigned int cluster); + +int __init mcpm_sync_init( + void (*power_up_setup)(unsigned int affinity_level)); + +#else + +/* + * asm-offsets.h causes trouble when included in .c files, and cacheflush.h + * cannot be included in asm files. Let's work around the conflict like this. + */ +#include +#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE + #endif /* ! __ASSEMBLY__ */ + +/* Definitions for mcpm_sync_struct */ +#define CPU_DOWN 0x11 +#define CPU_COMING_UP 0x12 +#define CPU_UP 0x13 +#define CPU_GOING_DOWN 0x14 + +#define CLUSTER_DOWN 0x21 +#define CLUSTER_UP 0x22 +#define CLUSTER_GOING_DOWN 0x23 + +#define INBOUND_NOT_COMING_UP 0x31 +#define INBOUND_COMING_UP 0x32 + +/* + * Offsets for the mcpm_sync_struct members, for use in asm. + * We don't want to make them global to the kernel via asm-offsets.c. + */ +#define MCPM_SYNC_CLUSTER_CPUS 0 +#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE +#define MCPM_SYNC_CLUSTER_CLUSTER \ + (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) +#define MCPM_SYNC_CLUSTER_INBOUND \ + (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) +#define MCPM_SYNC_CLUSTER_SIZE \ + (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) + #endif -- cgit v1.1 From a7eb7c6f9a657a01a8359edae31bbeacd18b072c Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 9 Apr 2013 01:29:17 -0400 Subject: ARM: mcpm: provide an interface to set the SMP ops at run time This is cleaner than exporting the mcpm_smp_ops structure. Signed-off-by: Nicolas Pitre Acked-by: Jon Medhurst --- arch/arm/include/asm/mcpm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 3046e90..0f7b762 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -167,6 +167,8 @@ int __mcpm_cluster_state(unsigned int cluster); int __init mcpm_sync_init( void (*power_up_setup)(unsigned int affinity_level)); +void __init mcpm_smp_set_ops(void); + #else /* -- cgit v1.1 From 6aaa189f8712471a250bfdf8fc8d08277258b8ab Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 23 Apr 2013 11:21:44 +0100 Subject: ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE ARM processors with LPAE enabled use 3 levels of page tables, with an entry in the top level (pgd) covering 1GB of virtual space. Because of the branch relocation limitations on ARM, the loadable modules are mapped 16MB below PAGE_OFFSET, making the corresponding 1GB pgd shared between kernel modules and user space. If free_pgtables() is called with the default ceiling 0, free_pgd_range() (and subsequently called functions) also frees the page table shared between user space and kernel modules (which is normally handled by the ARM-specific pgd_free() function). This patch changes defines the ARM USER_PGTABLES_CEILING to TASK_SIZE when CONFIG_ARM_LPAE is enabled. Note that the pgd_free() function already checks the presence of the shared pmd page allocated by pgd_alloc() and frees it, though with ceiling 0 this wasn't necessary. Signed-off-by: Catalin Marinas Cc: Hugh Dickins Cc: Andrew Morton Cc: # 3.3+ Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 80d6fc4..9bcd262 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define FIRST_USER_ADDRESS PAGE_SIZE /* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING TASK_SIZE +#endif + +/* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, * as well as any architecture dependent bits like global/ASID and SMP -- cgit v1.1 From 104ad3b32d7a71941c8ab2dee78eea38e8a23309 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 29 Apr 2013 15:07:45 -0700 Subject: arm: set the page table freeing ceiling to TASK_SIZE ARM processors with LPAE enabled use 3 levels of page tables, with an entry in the top level (pgd) covering 1GB of virtual space. Because of the branch relocation limitations on ARM, the loadable modules are mapped 16MB below PAGE_OFFSET, making the corresponding 1GB pgd shared between kernel modules and user space. If free_pgtables() is called with the default ceiling 0, free_pgd_range() (and subsequently called functions) also frees the page table shared between user space and kernel modules (which is normally handled by the ARM-specific pgd_free() function). This patch changes defines the ARM USER_PGTABLES_CEILING to TASK_SIZE when CONFIG_ARM_LPAE is enabled. Note that the pgd_free() function already checks the presence of the shared pmd page allocated by pgd_alloc() and frees it, though with ceiling 0 this wasn't necessary. Signed-off-by: Catalin Marinas Cc: Russell King Cc: Hugh Dickins Cc: [3.3+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/pgtable.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 80d6fc4..9bcd262 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define FIRST_USER_ADDRESS PAGE_SIZE /* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING TASK_SIZE +#endif + +/* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, * as well as any architecture dependent bits like global/ASID and SMP -- cgit v1.1