summaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h24
1 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index ee83251..5df6784 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -55,7 +55,7 @@ struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);
struct percpu_ref {
- atomic_t count;
+ atomic_long_t count;
/*
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
@@ -97,7 +97,7 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
* branches as it can't assume that @ref->pcpu_count is not NULL.
*/
static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
- unsigned __percpu **pcpu_countp)
+ unsigned long __percpu **pcpu_countp)
{
unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
@@ -107,7 +107,7 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
return false;
- *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
+ *pcpu_countp = (unsigned long __percpu *)pcpu_ptr;
return true;
}
@@ -119,14 +119,14 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
*/
static inline void percpu_ref_get(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *pcpu_count;
rcu_read_lock_sched();
if (__pcpu_ref_alive(ref, &pcpu_count))
this_cpu_inc(*pcpu_count);
else
- atomic_inc(&ref->count);
+ atomic_long_inc(&ref->count);
rcu_read_unlock_sched();
}
@@ -142,7 +142,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *pcpu_count;
int ret = false;
rcu_read_lock_sched();
@@ -151,7 +151,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
this_cpu_inc(*pcpu_count);
ret = true;
} else {
- ret = atomic_inc_not_zero(&ref->count);
+ ret = atomic_long_inc_not_zero(&ref->count);
}
rcu_read_unlock_sched();
@@ -175,7 +175,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *pcpu_count;
int ret = false;
rcu_read_lock_sched();
@@ -199,13 +199,13 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
*/
static inline void percpu_ref_put(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *pcpu_count;
rcu_read_lock_sched();
if (__pcpu_ref_alive(ref, &pcpu_count))
this_cpu_dec(*pcpu_count);
- else if (unlikely(atomic_dec_and_test(&ref->count)))
+ else if (unlikely(atomic_long_dec_and_test(&ref->count)))
ref->release(ref);
rcu_read_unlock_sched();
@@ -219,11 +219,11 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
*/
static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *pcpu_count;
if (__pcpu_ref_alive(ref, &pcpu_count))
return false;
- return !atomic_read(&ref->count);
+ return !atomic_long_read(&ref->count);
}
#endif
OpenPOWER on IntegriCloud