summaryrefslogtreecommitdiffstats
path: root/include/asm-sh/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/atomic.h')
-rw-r--r--include/asm-sh/atomic.h153
1 files changed, 2 insertions, 151 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 28305c3..e12570b 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
#include <linux/compiler.h>
#include <asm/system.h>
-/*
- * To get proper branch prediction for the main line, we must branch
- * forward to code at the end of this object's .text section, then
- * branch back to restart the operation.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
+#include <asm/atomic-llsc.h>
#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v += i;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v -= i;
- local_irq_restore(flags);
+#include <asm/atomic-irq.h>
#endif
-}
-
-/*
- * SH-4A note:
- *
- * We basically get atomic_xxx_return() for free compared with
- * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
- * encoding, so the retval is automatically set without having to
- * do any special work.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add_return \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp += i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub_return \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp -= i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
-
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
@@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_clear_mask \n"
-" and %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (~mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v &= ~mask;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_set_mask \n"
-" or %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v |= mask;
- local_irq_restore(flags);
-#endif
-}
-
/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
OpenPOWER on IntegriCloud