From a3a9f3b47d12b5f6dfc9c7ed9d7b193d77812195 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Fri, 21 Oct 2011 23:56:27 +0000 Subject: powerpc/irq: Remove IRQF_DISABLED Since commit [e58aa3d2: genirq: Run irq handlers with interrupts disabled], We run all interrupt handlers with interrupts disabled and we even check and yell when an interrupt handler returns with interrupts enabled (see commit [b738a50a: genirq: Warn when handler enables interrupts]). So now this flag is a NOOP and can be removed. Signed-off-by: Yong Zhang Acked-by: Arnd Bergmann Acked-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/floppy.h | 4 ++-- arch/powerpc/include/asm/xics.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h index 24bd34c..936a904 100644 --- a/arch/powerpc/include/asm/floppy.h +++ b/arch/powerpc/include/asm/floppy.h @@ -108,10 +108,10 @@ static int fd_request_irq(void) { if (can_use_virtual_dma) return request_irq(FLOPPY_IRQ, floppy_hardint, - IRQF_DISABLED, "floppy", NULL); + 0, "floppy", NULL); else return request_irq(FLOPPY_IRQ, floppy_interrupt, - IRQF_DISABLED, "floppy", NULL); + 0, "floppy", NULL); } static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index bd6c401..c48de98 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -15,8 +15,8 @@ #define DEFAULT_PRIORITY 5 /* - * Mark IPIs as higher priority so we can take them inside interrupts that - * arent marked IRQF_DISABLED + * Mark IPIs as higher priority so we can take them inside interrupts + * FIXME: still true now? */ #define IPI_PRIORITY 4 -- cgit v1.1 From 9fce85f7ff94f1a877c15ad3d6ffbaed4b5cd1a6 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Wed, 12 Oct 2011 08:42:13 +0000 Subject: powerpc/ps3: Fix lv1_gpu_attribute hcall The lv1_gpu_attribute hcall takes three, not five input arguments. Adjust the lv1 hcall table and all calls. Signed-off-by: Geoff Levand CC: Takashi Iwai Acked-by: Takashi Iwai Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/lv1call.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h index 9cd5fc8..f77c708 100644 --- a/arch/powerpc/include/asm/lv1call.h +++ b/arch/powerpc/include/asm/lv1call.h @@ -316,7 +316,7 @@ LV1_CALL(gpu_context_free, 1, 0, 218 ) LV1_CALL(gpu_context_iomap, 5, 0, 221 ) LV1_CALL(gpu_context_attribute, 6, 0, 225 ) LV1_CALL(gpu_context_intr, 1, 1, 227 ) -LV1_CALL(gpu_attribute, 5, 0, 228 ) +LV1_CALL(gpu_attribute, 3, 0, 228 ) LV1_CALL(get_rtc, 0, 2, 232 ) LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 ) LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 ) -- cgit v1.1 From d715e433b7ad19c02fc4becf0d5e9a59f97925de Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 14 Nov 2011 12:54:47 +0000 Subject: powerpc: Copy down exception vectors after feature fixups kdump fails because we try to execute an HV only instruction. Feature fixups are being applied after we copy the exception vectors down to 0 so they miss out on any updates. We have always had this issue but it only became critical in v3.0 when we added CFAR support (breaks POWER5) and v3.1 when we added POWERNV (breaks everyone). Signed-off-by: Anton Blanchard Cc: [v3.0+] Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/sections.h | 2 +- arch/powerpc/include/asm/synch.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 6fbce72..a0f358d 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -8,7 +8,7 @@ #ifdef __powerpc64__ -extern char _end[]; +extern char __end_interrupts[]; static inline int in_kernel_text(unsigned long addr) { diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index d7cab44..87878c6 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -13,6 +13,7 @@ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; extern void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end); +extern void do_final_fixups(void); static inline void eieio(void) { -- cgit v1.1 From 187b9f2aa769198daa7cf8054abb65a08b8d8b47 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 6 Oct 2011 02:53:40 +0000 Subject: powerpc/book3e-64: Fix debug support for userspace With the introduction of CONFIG_PPC_ADV_DEBUG_REGS user space debug is broken on Book-E 64-bit parts that support delayed debug events. When switch_booke_debug_regs() sets DBCR0 we'll start getting debug events as MSR_DE is also set and we aren't able to handle debug events from kernel space. We can remove the hack that always enables MSR_DE and loads up DBCR0 and just utilize switch_booke_debug_regs() to get user space debug working again. We still need to handle critical/debug exception stacks & proper save/restore of state for those exception levles to support debug events from kernel space like we have on 32-bit. Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/reg_booke.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 28cdbd9..03c48e8 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -31,7 +31,7 @@ #define MSR_ MSR_ME | MSR_CE #define MSR_KERNEL MSR_ | MSR_64BIT -#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE +#define MSR_USER32 MSR_ | MSR_PR | MSR_EE #define MSR_USER64 MSR_USER32 | MSR_64BIT #elif defined (CONFIG_40x) #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) -- cgit v1.1 From b97021f85517552ea8a0d2c1680c1ee4beab6d14 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 15 Nov 2011 17:11:27 +0000 Subject: powerpc: Fix atomic_xxx_return barrier semantics The Documentation/memory-barriers.txt document requires that atomic operations that return a value act as a memory barrier both before and after the actual atomic operation. Our current implementation doesn't guarantee this. More specifically, while a load following the isync can not be issued before stwcx. has completed, that completion doesn't architecturally means that the result of stwcx. is visible to other processors (or any previous stores for that matter) (typically, the other processors L1 caches can still hold the old value). This has caused an actual crash in RCU torture testing on Power 7 This fixes it by changing those atomic ops to use new macros instead of RELEASE/ACQUIRE barriers, called ATOMIC_ENTRY and ATMOIC_EXIT barriers, which are then defined respectively to lwsync and sync. I haven't had a chance to measure the performance impact (or rather what I measured with kernel compiles is in the noise, I yet have to find a more precise benchmark) Signed-off-by: Benjamin Herrenschmidt Acked-by: Paul E. McKenney --- arch/powerpc/include/asm/atomic.h | 48 +++++++++++++++++++-------------------- arch/powerpc/include/asm/bitops.h | 12 +++++----- arch/powerpc/include/asm/futex.h | 7 +++--- arch/powerpc/include/asm/synch.h | 8 +++++-- 4 files changed, 40 insertions(+), 35 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index e2a4c26..02e41b5 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 # atomic_add_return\n\ add %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 # atomic_sub_return\n\ subf %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # atomic_inc_return\n\ addic %0,%0,1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # atomic_dec_return\n\ addic %0,%0,-1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) int t; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # __atomic_add_unless\n\ cmpw 0,%0,%3 \n\ beq- 2f \n\ @@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) PPC405_ERR77(0,%2) " stwcx. %0,0,%1 \n\ bne- 1b \n" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER " subf %0,%2,%0 \n\ 2:" : "=&r" (t) @@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ cmpwi %0,1\n\ addi %0,%0,-1\n\ @@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&b" (t) : "r" (&v->counter) @@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 # atomic64_add_return\n\ add %0,%1,%0\n\ stdcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 # atomic64_sub_return\n\ subf %0,%1,%0\n\ stdcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # atomic64_inc_return\n\ addic %0,%0,1\n\ stdcx. %0,0,%1 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # atomic64_dec_return\n\ addic %0,%0,-1\n\ stdcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ addic. %0,%0,-1\n\ blt- 2f\n\ stdcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (t) : "r" (&v->counter) @@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) long t; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # __atomic_add_unless\n\ cmpd 0,%0,%3 \n\ beq- 2f \n\ add %0,%2,%0 \n" " stdcx. %0,0,%1 \n\ bne- 1b \n" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER " subf %0,%2,%0 \n\ 2:" : "=&r" (t) diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index e137afc..efdc926 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \ return (old & mask); \ } -DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, - PPC_ACQUIRE_BARRIER, 0) +DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) DEFINE_TESTOP(test_and_set_bits_lock, or, "", PPC_ACQUIRE_BARRIER, 1) -DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, - PPC_ACQUIRE_BARRIER, 0) -DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, - PPC_ACQUIRE_BARRIER, 0) +DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) +DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index c94e4a3..2a9cf84 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -11,12 +11,13 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ - PPC_RELEASE_BARRIER \ + PPC_ATOMIC_ENTRY_BARRIER \ "1: lwarx %0,0,%2\n" \ insn \ PPC405_ERR77(0, %2) \ "2: stwcx. %1,0,%2\n" \ "bne- 1b\n" \ + PPC_ATOMIC_EXIT_BARRIER \ "li %1,0\n" \ "3: .section .fixup,\"ax\"\n" \ "4: li %1,%3\n" \ @@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ cmpw 0,%1,%4\n\ bne- 3f\n" PPC405_ERR77(0,%3) "2: stwcx. %5,0,%3\n\ bne- 1b\n" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "3: .section .fixup,\"ax\"\n\ 4: li %0,%6\n\ b 3b\n\ diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 87878c6..e682a71 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -42,11 +42,15 @@ static inline void isync(void) START_LWSYNC_SECTION(97); \ isync; \ MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); -#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) -#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" +#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) +#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" +#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n" +#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" #else #define PPC_ACQUIRE_BARRIER #define PPC_RELEASE_BARRIER +#define PPC_ATOMIC_ENTRY_BARRIER +#define PPC_ATOMIC_EXIT_BARRIER #endif #endif /* __KERNEL__ */ -- cgit v1.1 From bb75c627fb0dfb8c0ab75d3033709ff928896e16 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 17 Nov 2011 15:26:35 +0100 Subject: Revert "KVM: PPC: Add support for explicit HIOR setting" This reverts commit a15bd354f083f20f257db450488db52ac27df439. It exceeded the padding on the SREGS struct, rendering the ABI backwards-incompatible. Conflicts: arch/powerpc/kvm/powerpc.c include/linux/kvm.h Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm.h | 8 -------- arch/powerpc/include/asm/kvm_book3s.h | 2 -- 2 files changed, 10 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index 08fe69e..0ad432b 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -149,12 +149,6 @@ struct kvm_regs { #define KVM_SREGS_E_UPDATE_DBSR (1 << 3) /* - * Book3S special bits to indicate contents in the struct by maintaining - * backwards compatibility with older structs. If adding a new field, - * please make sure to add a flag for that new field */ -#define KVM_SREGS_S_HIOR (1 << 0) - -/* * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a * previous KVM_GET_REGS. * @@ -179,8 +173,6 @@ struct kvm_sregs { __u64 ibat[8]; __u64 dbat[8]; } ppc32; - __u64 flags; /* KVM_SREGS_S_ */ - __u64 hior; } s; struct { union { diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index a384ffd..d4df013 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s { #endif int context_id[SID_CONTEXTS]; - bool hior_sregs; /* HIOR is set by SREGS, not PVR */ - struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; -- cgit v1.1