summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/flex_proportions.c8
-rw-r--r--lib/percpu-refcount.c43
-rw-r--r--lib/percpu_counter.c20
-rw-r--r--lib/proportions.c10
4 files changed, 45 insertions, 36 deletions
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index ebf3bac..8f25652 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -34,13 +34,13 @@
*/
#include <linux/flex_proportions.h>
-int fprop_global_init(struct fprop_global *p)
+int fprop_global_init(struct fprop_global *p, gfp_t gfp)
{
int err;
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
- err = percpu_counter_init(&p->events, 1);
+ err = percpu_counter_init(&p->events, 1, gfp);
if (err)
return err;
seqcount_init(&p->sequence);
@@ -168,11 +168,11 @@ void fprop_fraction_single(struct fprop_global *p,
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int fprop_local_init_percpu(struct fprop_local_percpu *pl)
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
{
int err;
- err = percpu_counter_init(&pl->events, 0);
+ err = percpu_counter_init(&pl->events, 0, gfp);
if (err)
return err;
pl->period = 0;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index a89cf09..c6c31e2 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -25,33 +25,35 @@
* works.
*
* Converting to non percpu mode is done with some RCUish stuff in
- * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
- * can't hit 0 before we've added up all the percpu refs.
+ * percpu_ref_kill. Additionally, we need a bias value so that the
+ * atomic_long_t can't hit 0 before we've added up all the percpu refs.
*/
-#define PCPU_COUNT_BIAS (1U << 31)
+#define PCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
-static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref)
{
- return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+ return (unsigned long __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
}
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
* @release: function which will be called when refcount hits 0
+ * @gfp: allocation mask to use
*
* Initializes the refcount in single atomic counter mode with a refcount of 1;
- * analagous to atomic_set(ref, 1).
+ * analagous to atomic_long_set(ref, 1).
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
*/
-int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
+int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
+ gfp_t gfp)
{
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS);
- ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
+ ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned long, gfp);
if (!ref->pcpu_count_ptr)
return -ENOMEM;
@@ -73,13 +75,13 @@ EXPORT_SYMBOL_GPL(percpu_ref_init);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
+ unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
int cpu;
BUG_ON(!pcpu_count);
WARN_ON(!percpu_ref_is_zero(ref));
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS);
/*
* Restore per-cpu operation. smp_store_release() is paired with
@@ -107,7 +109,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit);
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
+ unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
if (pcpu_count) {
free_percpu(pcpu_count);
@@ -119,14 +121,15 @@ EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
- unsigned count = 0;
+ unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
+ unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(pcpu_count, cpu);
- pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
+ pr_debug("global %ld pcpu %ld",
+ atomic_long_read(&ref->count), (long)count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
@@ -141,10 +144,11 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
* time is equivalent and saves us atomic operations:
*/
- atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
+ atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count);
- WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
- atomic_read(&ref->count));
+ WARN_ONCE(atomic_long_read(&ref->count) <= 0,
+ "percpu ref (%pf) <= 0 (%ld) after killed",
+ ref->release, atomic_long_read(&ref->count));
/* @ref is viewed as dead on all CPUs, send out kill confirmation */
if (ref->confirm_kill)
@@ -176,7 +180,8 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
- "percpu_ref_kill() called more than once!\n");
+ "percpu_ref_kill() called more than once on %pf!",
+ ref->release);
ref->pcpu_count_ptr |= PCPU_REF_DEAD;
ref->confirm_kill = confirm_kill;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577..48144cd 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -112,13 +112,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(__percpu_counter_sum);
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
+ unsigned long flags __maybe_unused;
+
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
- fbc->counters = alloc_percpu(s32);
+ fbc->counters = alloc_percpu_gfp(s32, gfp);
if (!fbc->counters)
return -ENOMEM;
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
+ unsigned long flags __maybe_unused;
+
if (!fbc->counters)
return;
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
- spin_lock(&percpu_counters_lock);
+ spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irq(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
diff --git a/lib/proportions.c b/lib/proportions.c
index 05df848..6f72429 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,7 +73,7 @@
#include <linux/proportions.h>
#include <linux/rcupdate.h>
-int prop_descriptor_init(struct prop_descriptor *pd, int shift)
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
{
int err;
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
pd->index = 0;
pd->pg[0].shift = shift;
mutex_init(&pd->mutex);
- err = percpu_counter_init(&pd->pg[0].events, 0);
+ err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
if (err)
goto out;
- err = percpu_counter_init(&pd->pg[1].events, 0);
+ err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
if (err)
percpu_counter_destroy(&pd->pg[0].events);
@@ -188,12 +188,12 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int prop_local_init_percpu(struct prop_local_percpu *pl)
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
{
raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
- return percpu_counter_init(&pl->events, 0);
+ return percpu_counter_init(&pl->events, 0, gfp);
}
void prop_local_destroy_percpu(struct prop_local_percpu *pl)
OpenPOWER on IntegriCloud