summaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h205
1 files changed, 204 insertions, 1 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5095b83..27c3c6f 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void);
pscr_ret__; \
})
+#define __pcpu_size_call_return2(stem, variable, ...) \
+({ \
+ typeof(variable) pscr2_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
+ case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
+ case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
+ case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
+ default: \
+ __bad_size_call_parameter(); break; \
+ } \
+ pscr2_ret__; \
+})
+
#define __pcpu_size_call(stem, variable, ...) \
do { \
__verify_pcpu_ptr(&(variable)); \
@@ -402,6 +417,89 @@ do { \
# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
#endif
+#define _this_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(pcp) ret__; \
+ preempt_disable(); \
+ __this_cpu_add(pcp, val); \
+ ret__ = __this_cpu_read(pcp); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_add_return
+# ifndef this_cpu_add_return_1
+# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_2
+# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_4
+# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_8
+# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
+
+#define _this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_xchg
+# ifndef this_cpu_xchg_1
+# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_2
+# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_4
+# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_8
+# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# define this_cpu_xchg(pcp, nval) \
+ __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_cmpxchg
+# ifndef this_cpu_cmpxchg_1
+# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_2
+# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_4
+# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_8
+# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define this_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
/*
* Generic percpu operations that do not require preemption handling.
* Either we do not care about races or the caller has the
@@ -529,11 +627,87 @@ do { \
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
#endif
+#define __this_cpu_generic_add_return(pcp, val) \
+({ \
+ __this_cpu_add(pcp, val); \
+ __this_cpu_read(pcp); \
+})
+
+#ifndef __this_cpu_add_return
+# ifndef __this_cpu_add_return_1
+# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_2
+# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_4
+# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_8
+# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
+
+#define __this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_xchg
+# ifndef __this_cpu_xchg_1
+# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_2
+# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_4
+# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_8
+# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# define __this_cpu_xchg(pcp, nval) \
+ __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_cmpxchg
+# ifndef __this_cpu_cmpxchg_1
+# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_2
+# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_4
+# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_8
+# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define __this_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
/*
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another
* processors (which one gets when using regular atomic operations)
- . They are guaranteed to be atomic vs. local interrupts and
+ * They are guaranteed to be atomic vs. local interrupts and
* preemption only.
*/
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
@@ -620,4 +794,33 @@ do { \
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
#endif
+#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ local_irq_restore(flags); \
+ ret__; \
+})
+
+#ifndef irqsafe_cpu_cmpxchg
+# ifndef irqsafe_cpu_cmpxchg_1
+# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_2
+# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_4
+# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_8
+# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
#endif /* __LINUX_PERCPU_H */
OpenPOWER on IntegriCloud