diff options
-rw-r--r-- | include/linux/cgroup.h | 2 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 34 | ||||
-rw-r--r-- | include/linux/percpu.h | 2 |
3 files changed, 35 insertions, 3 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index d60904b..bddebc5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -101,7 +101,7 @@ static inline bool css_tryget(struct cgroup_subsys_state *css) { if (css->flags & CSS_ROOT) return true; - return percpu_ref_tryget(&css->refcnt); + return percpu_ref_tryget_live(&css->refcnt); } /** diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 0afb48f..5d8920e 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -121,6 +121,36 @@ static inline void percpu_ref_get(struct percpu_ref *ref) * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * + * Increment a percpu refcount unless its count already reached zero. + * Returns %true on success; %false on failure. + * + * The caller is responsible for ensuring that @ref stays accessible. + */ +static inline bool percpu_ref_tryget(struct percpu_ref *ref) +{ + unsigned __percpu *pcpu_count; + int ret = false; + + rcu_read_lock_sched(); + + pcpu_count = ACCESS_ONCE(ref->pcpu_count); + + if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + this_cpu_inc(*pcpu_count); + ret = true; + } else { + ret = atomic_inc_not_zero(&ref->count); + } + + rcu_read_unlock_sched(); + + return ret; +} + +/** + * percpu_ref_tryget_live - try to increment a live percpu refcount + * @ref: percpu_ref to try-get + * * Increment a percpu refcount unless it has already been killed. Returns * %true on success; %false on failure. * @@ -128,8 +158,10 @@ static inline void percpu_ref_get(struct percpu_ref *ref) * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be * used. After the confirm_kill callback is invoked, it's guaranteed that * no new reference will be given out by percpu_ref_tryget(). + * + * The caller is responsible for ensuring that @ref stays accessible. */ -static inline bool percpu_ref_tryget(struct percpu_ref *ref) +static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { unsigned __percpu *pcpu_count; int ret = false; diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 495c654..8419053 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -29,7 +29,7 @@ */ #define get_cpu_var(var) (*({ \ preempt_disable(); \ - &__get_cpu_var(var); })) + this_cpu_ptr(&var); })) /* * The weird & is necessary because sparse considers (void)(var) to be |