summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFan Du <fan.du@windriver.com>2013-07-03 15:05:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 16:07:43 -0700
commit64df3071a97f20767f63b88c573791691a855b5c (patch)
treed7f8de21c425465f6e790c5de74d2dc54214b9e3
parent5cb0656b62ff1199763764e4f6b4c06d30d5d0f5 (diff)
downloadop-kernel-dev-64df3071a97f20767f63b88c573791691a855b5c.zip
op-kernel-dev-64df3071a97f20767f63b88c573791691a855b5c.tar.gz
lib/percpu_counter.c: __this_cpu_write() doesn't need to be protected by spinlock
__this_cpu_write doesn't need to be protected by spinlock, AS we are doing per cpu write with preempt disabled. And another reason to remove __this_cpu_write outside of spinlock: __percpu_counter_sum is not an accurate counter. Signed-off-by: Fan Du <fan.du@windriver.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--lib/percpu_counter.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ba6085d..1fc23a3 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -80,8 +80,8 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
if (count >= batch || count <= -batch) {
raw_spin_lock(&fbc->lock);
fbc->count += count;
- __this_cpu_write(*fbc->counters, 0);
raw_spin_unlock(&fbc->lock);
+ __this_cpu_write(*fbc->counters, 0);
} else {
__this_cpu_write(*fbc->counters, count);
}
OpenPOWER on IntegriCloud