diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-20 01:27:25 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-20 01:27:25 -0400 |
commit | e625305b390790717cf2cccf61efb81299647028 (patch) | |
tree | 87bde164e935bf06fca1b4adfa26d23f1cca4238 /lib/percpu-refcount.c | |
parent | 4843c3320c3d23ab4ecf520f5eaf485aff8c7252 (diff) | |
download | op-kernel-dev-e625305b390790717cf2cccf61efb81299647028.zip op-kernel-dev-e625305b390790717cf2cccf61efb81299647028.tar.gz |
percpu-refcount: make percpu_ref based on longs instead of ints
percpu_ref is currently based on ints and the number of refs it can
cover is (1 << 31). This makes it impossible to use a percpu_ref to
count memory objects or pages on 64bit machines as it may overflow.
This forces those users to somehow aggregate the references before
contributing to the percpu_ref which is often cumbersome and sometimes
challenging to get the same level of performance as using the
percpu_ref directly.
While using ints for the percpu counters makes them pack tighter on
64bit machines, the possible gain from using ints instead of longs is
extremely small compared to the overall gain from per-cpu operation.
This patch makes percpu_ref based on longs so that it can be used to
directly count memory objects or pages.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Diffstat (limited to 'lib/percpu-refcount.c')
-rw-r--r-- | lib/percpu-refcount.c | 37 |
1 files changed, 19 insertions, 18 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 70d28c9..559ee0b 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -25,15 +25,15 @@ * works. * * Converting to non percpu mode is done with some RCUish stuff in - * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t - * can't hit 0 before we've added up all the percpu refs. + * percpu_ref_kill. Additionally, we need a bias value so that the + * atomic_long_t can't hit 0 before we've added up all the percpu refs. */ -#define PCPU_COUNT_BIAS (1U << 31) +#define PCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) -static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) +static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref) { - return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); + return (unsigned long __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); } /** @@ -43,7 +43,7 @@ static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) * @gfp: allocation mask to use * * Initializes the refcount in single atomic counter mode with a refcount of 1; - * analagous to atomic_set(ref, 1). + * analagous to atomic_long_set(ref, 1). * * Note that @release must not sleep - it may potentially be called from RCU * callback context by percpu_ref_kill(). @@ -51,9 +51,9 @@ static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, gfp_t gfp) { - atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); + atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); - ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned, gfp); + ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned long, gfp); if (!ref->pcpu_count_ptr) return -ENOMEM; @@ -75,13 +75,13 @@ EXPORT_SYMBOL_GPL(percpu_ref_init); */ void percpu_ref_reinit(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); + unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); int cpu; BUG_ON(!pcpu_count); WARN_ON(!percpu_ref_is_zero(ref)); - atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); + atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); /* * Restore per-cpu operation. smp_store_release() is paired with @@ -109,7 +109,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit); */ void percpu_ref_exit(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); + unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); if (pcpu_count) { free_percpu(pcpu_count); @@ -121,14 +121,15 @@ EXPORT_SYMBOL_GPL(percpu_ref_exit); static void percpu_ref_kill_rcu(struct rcu_head *rcu) { struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); - unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); - unsigned count = 0; + unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); + unsigned long count = 0; int cpu; for_each_possible_cpu(cpu) count += *per_cpu_ptr(pcpu_count, cpu); - pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); + pr_debug("global %ld pcpu %ld", + atomic_long_read(&ref->count), (long)count); /* * It's crucial that we sum the percpu counters _before_ adding the sum @@ -143,11 +144,11 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) * time is equivalent and saves us atomic operations: */ - atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); + atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); - WARN_ONCE(atomic_read(&ref->count) <= 0, - "percpu ref (%pf) <= 0 (%i) after killed", - ref->release, atomic_read(&ref->count)); + WARN_ONCE(atomic_long_read(&ref->count) <= 0, + "percpu ref (%pf) <= 0 (%ld) after killed", + ref->release, atomic_long_read(&ref->count)); /* @ref is viewed as dead on all CPUs, send out kill confirmation */ if (ref->confirm_kill) |