summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorShaohua Li <shli@fusionio.com>2013-10-24 09:06:45 +0100
committerJens Axboe <axboe@kernel.dk>2013-10-25 11:55:59 +0100
commit098faf5805c80f951ce5e8b4a6842382ad793c38 (patch)
tree7479b126910fc14f0b1690b9414c8bf9033dce70 /lib
parent71fe07d040626de7b72244bf6de889c2e0f5aea3 (diff)
downloadop-kernel-dev-098faf5805c80f951ce5e8b4a6842382ad793c38.zip
op-kernel-dev-098faf5805c80f951ce5e8b4a6842382ad793c38.tar.gz
percpu_counter: make APIs irq safe
In my usage, sometimes the percpu APIs are called with irq locked, sometimes not. lockdep complains there is potential deadlock. Let's always use percpucounter lock in irq safe way. There should be no performance penality, as all those are slow code path. Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu_counter.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 93c5d5e..7473ee3 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
+ unsigned long flags;
- raw_spin_lock(&fbc->lock);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
- raw_spin_unlock(&fbc->lock);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
EXPORT_SYMBOL(percpu_counter_set);
@@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
- raw_spin_lock(&fbc->lock);
+ unsigned long flags;
+ raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
- raw_spin_unlock(&fbc->lock);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
__this_cpu_write(*fbc->counters, 0);
} else {
__this_cpu_write(*fbc->counters, count);
@@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
+ unsigned long flags;
- raw_spin_lock(&fbc->lock);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
- raw_spin_unlock(&fbc->lock);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
OpenPOWER on IntegriCloud