summaryrefslogtreecommitdiffstats
path: root/lib/percpu-refcount.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-06-28 08:10:14 -0400
committerTejun Heo <tj@kernel.org>2014-06-28 08:10:14 -0400
commit2d7227828e1475c7b272e55bd70c4cec8eea219a (patch)
tree068171c424acc2390b1e6ecf514182c82cc5811d /lib/percpu-refcount.c
parent9a1049da9bd2cd83fe11d46433e603c193aa9c71 (diff)
downloadop-kernel-dev-2d7227828e1475c7b272e55bd70c4cec8eea219a.zip
op-kernel-dev-2d7227828e1475c7b272e55bd70c4cec8eea219a.tar.gz
percpu-refcount: implement percpu_ref_reinit() and percpu_ref_is_zero()
Now that explicit invocation of percpu_ref_exit() is necessary to free the percpu counter, we can implement percpu_ref_reinit() which reinitializes a released percpu_ref. This can be used implement scalable gating switch which can be drained and then re-opened without worrying about memory allocation failures. percpu_ref_is_zero() is added to be used in a sanity check in percpu_ref_exit(). As this function will be useful for other purposes too, make it a public interface. v2: Use smp_read_barrier_depends() instead of smp_load_acquire(). We only need data dep barrier and smp_load_acquire() is stronger and heavier on some archs. Spotted by Lai Jiangshan. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Kent Overstreet <kmo@daterainc.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'lib/percpu-refcount.c')
-rw-r--r--lib/percpu-refcount.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index ac42991..fe5a334 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -61,6 +61,41 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
+ *
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init(). @ref must have been initialized successfully, killed
+ * and reached 0 but not exited.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_reinit(struct percpu_ref *ref)
+{
+ unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
+ int cpu;
+
+ BUG_ON(!pcpu_count);
+ WARN_ON(!percpu_ref_is_zero(ref));
+
+ atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+
+ /*
+ * Restore per-cpu operation. smp_store_release() is paired with
+ * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
+ * that the zeroing is visible to all percpu accesses which can see
+ * the following PCPU_REF_DEAD clearing.
+ */
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(pcpu_count, cpu) = 0;
+
+ smp_store_release(&ref->pcpu_count_ptr,
+ ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
* percpu_ref_exit - undo percpu_ref_init()
* @ref: percpu_ref to exit
*
OpenPOWER on IntegriCloud