summaryrefslogtreecommitdiffstats
path: root/include/asm-frv
diff options
context:
space:
mode:
authorMathieu Desnoyers <compudj@krystal.dyndns.org>2008-02-08 15:00:45 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 15:33:32 -0800
commit6784fd5931a58559673f500a333030ceaadb69bb (patch)
tree911673988500ab7c923b210220a462286741c485 /include/asm-frv
parentb55fcb22d445a7460cbbc138ceae096d5617715a (diff)
downloadop-kernel-dev-6784fd5931a58559673f500a333030ceaadb69bb.zip
op-kernel-dev-6784fd5931a58559673f500a333030ceaadb69bb.tar.gz
Fix FRV cmpxchg_local
Fix the FRV cmpxchg_local by breaking the following header dependency loop : linux/kernel.h -> linux/bitops.h -> asm-frv/bitops.h -> asm-frv/atomic.h -> asm-frv/system.h -> asm-generic/cmpxchg_local.h -> typecheck() defined in linux/kernel.h and linux/kernel.h -> linux/bitops.h -> asm-frv/bitops.h -> asm-frv/atomic.h -> asm-generic/cmpxchg_local.h -> typecheck() defined in linux/kernel.h In order to fix this : - Move the atomic_test_and_ *_mask inlines from asm-frv/atomic.h (why are they there at all anyway ? They are not touching atomic_t variables!) to asm-frv/bitops.h. Also fix a build issue with cmpxchg : it does not cast to (unsigned long *) like other architectures, to deal with it in the cmpxchg_local macro. FRV builds fine with this patch. Thanks to Adrian Bunk <bunk@kernel.org> for spotting this bug. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Adrian Bunk <bunk@kernel.org> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-frv')
-rw-r--r--include/asm-frv/atomic.h81
-rw-r--r--include/asm-frv/bitops.h83
-rw-r--r--include/asm-frv/system.h3
3 files changed, 83 insertions, 84 deletions
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 6ec494a..46d696b 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -125,87 +125,6 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-static inline
-unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
-{
- unsigned long old, tmp;
-
- asm volatile(
- "0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " and%I3 %1,%3,%2 \n"
- " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
- " beq icc3,#0,0b \n"
- : "+U"(*v), "=&r"(old), "=r"(tmp)
- : "NPr"(~mask)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return old;
-}
-
-static inline
-unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
-{
- unsigned long old, tmp;
-
- asm volatile(
- "0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " or%I3 %1,%3,%2 \n"
- " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
- " beq icc3,#0,0b \n"
- : "+U"(*v), "=&r"(old), "=r"(tmp)
- : "NPr"(mask)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return old;
-}
-
-static inline
-unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
-{
- unsigned long old, tmp;
-
- asm volatile(
- "0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " xor%I3 %1,%3,%2 \n"
- " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
- " beq icc3,#0,0b \n"
- : "+U"(*v), "=&r"(old), "=r"(tmp)
- : "NPr"(mask)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return old;
-}
-
-#else
-
-extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-
-#endif
-
-#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
-#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
-
/*****************************************************************************/
/*
* exchange value with memory
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index 5f86b87..39456ba 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -16,8 +16,6 @@
#include <linux/compiler.h>
#include <asm/byteorder.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
#ifdef __KERNEL__
@@ -33,6 +31,87 @@
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
+#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+static inline
+unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
+{
+ unsigned long old, tmp;
+
+ asm volatile(
+ "0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " and%I3 %1,%3,%2 \n"
+ " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
+ " beq icc3,#0,0b \n"
+ : "+U"(*v), "=&r"(old), "=r"(tmp)
+ : "NPr"(~mask)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return old;
+}
+
+static inline
+unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
+{
+ unsigned long old, tmp;
+
+ asm volatile(
+ "0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " or%I3 %1,%3,%2 \n"
+ " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
+ " beq icc3,#0,0b \n"
+ : "+U"(*v), "=&r"(old), "=r"(tmp)
+ : "NPr"(mask)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return old;
+}
+
+static inline
+unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
+{
+ unsigned long old, tmp;
+
+ asm volatile(
+ "0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " xor%I3 %1,%3,%2 \n"
+ " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
+ " beq icc3,#0,0b \n"
+ : "+U"(*v), "=&r"(old), "=r"(tmp)
+ : "NPr"(mask)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return old;
+}
+
+#else
+
+extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
+extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
+extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
+
+#endif
+
+#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
+#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
+
static inline int test_and_clear_bit(int nr, volatile void *addr)
{
volatile unsigned long *ptr = addr;
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index 59be544..b400cea 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/linkage.h>
+#include <linux/kernel.h>
struct thread_struct;
@@ -276,7 +277,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
{
switch (size) {
case 4:
- return cmpxchg(ptr, old, new);
+ return cmpxchg((unsigned long *)ptr, old, new);
default:
return __cmpxchg_local_generic(ptr, old, new, size);
}
OpenPOWER on IntegriCloud