summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-23 03:01:02 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 07:38:17 -0800
commit0b2fcfdb8b4e7e379192f24ea2203163ddf5df1d (patch)
tree1f3995e41ab12ff76e737389e0b59a40c0c73668 /include
parent713729e8b993cb880225e2ced50a3f5ac05c2b3f (diff)
downloadop-kernel-dev-0b2fcfdb8b4e7e379192f24ea2203163ddf5df1d.zip
op-kernel-dev-0b2fcfdb8b4e7e379192f24ea2203163ddf5df1d.tar.gz
[PATCH] atomic: add_unless cmpxchg optimise
Without branch hints, the very unlikely chance of the loop repeating due to cmpxchg failure is unrolled with gcc-4 that I have tested. Improve this for architectures with a native cas/cmpxchg. llsc archs should try to implement this natively. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Andi Kleen <ak@muc.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/atomic.h8
-rw-r--r--include/asm-ia64/atomic.h8
-rw-r--r--include/asm-m68k/atomic.h8
-rw-r--r--include/asm-s390/atomic.h18
-rw-r--r--include/asm-sparc64/atomic.h10
-rw-r--r--include/asm-x86_64/atomic.h8
6 files changed, 50 insertions, 10 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 78b0032..22d80ec 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -225,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index d3e0dfa..569ec75 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 862e497..732d696 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -175,8 +175,14 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index be6fefe2..de1d992 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -89,10 +89,15 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
-
c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic_cmpxchg(v, c, c + a);
+ if (likely(old == c))
+ break;
c = old;
+ }
return c != u;
}
@@ -167,10 +172,15 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
long long a, long long u)
{
long long c, old;
-
c = atomic64_read(v);
- while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + a);
+ if (likely(old == c))
+ break;
c = old;
+ }
return c != u;
}
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 25256bd..468eb48 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -78,9 +78,15 @@ extern int atomic64_sub_ret(int, atomic64_t *);
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
- c != (u); \
+ } \
+ likely(c != (u)); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd55..cecbf7b 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
OpenPOWER on IntegriCloud