diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-01-06 00:11:20 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-06 08:33:29 -0800 |
commit | d3cb487149bd706aa6aeb02042332a450978dc1c (patch) | |
tree | 69051e0f9853314cf275e4e800faad950e3053c3 /include/asm-generic | |
parent | 070f80326a215d8e6c4fd6f175e28eb446c492bc (diff) | |
download | op-kernel-dev-d3cb487149bd706aa6aeb02042332a450978dc1c.zip op-kernel-dev-d3cb487149bd706aa6aeb02042332a450978dc1c.tar.gz |
[PATCH] atomic_long_t & include/asm-generic/atomic.h V2
Several counters already have the need to use 64 atomic variables on 64 bit
platforms (see mm_counter_t in sched.h). We have to do ugly ifdefs to fall
back to 32 bit atomic on 32 bit platforms.
The VM statistics patch that I am working on will also make more extensive
use of atomic64.
This patch introduces a new type atomic_long_t by providing definitions in
asm-generic/atomic.h that works similar to the c "long" type. Its 32 bits
on 32 bit platforms and 64 bits on 64 bit platforms.
Also cleans up the determination of the mm_counter_t in sched.h.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic.h | 116 |
1 files changed, 116 insertions, 0 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h new file mode 100644 index 0000000..e0a28b9 --- /dev/null +++ b/include/asm-generic/atomic.h @@ -0,0 +1,116 @@ +#ifndef _ASM_GENERIC_ATOMIC_H +#define _ASM_GENERIC_ATOMIC_H +/* + * Copyright (C) 2005 Silicon Graphics, Inc. + * Christoph Lameter <clameter@sgi.com> + * + * Allows to provide arch independent atomic definitions without the need to + * edit all arch specific atomic.h files. + */ + + +/* + * Suppport for atomic_long_t + * + * Casts for parameters are avoided for existing atomic functions in order to + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the + * macros of a platform may have. + */ + +#if BITS_PER_LONG == 64 + +typedef atomic64_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_sub(i, v); +} + +#else + +typedef atomic_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic_t *v = (atomic_t *)l; + + atomic_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_sub(i, v); +} + +#endif +#endif |