diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-07 21:55:31 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-08 14:14:01 -0800 |
commit | e2bab3d92486fb781f4d06f56339264ed1492392 (patch) | |
tree | 0295e1ed29794a8e804c4e28a3ea9e72777e533b | |
parent | b884e25784f62a1c740d2e4c1ce19cb89644e986 (diff) | |
download | op-kernel-dev-e2bab3d92486fb781f4d06f56339264ed1492392.zip op-kernel-dev-e2bab3d92486fb781f4d06f56339264ed1492392.tar.gz |
[PATCH] percpu_counter_sum()
Implement percpu_counter_sum(). This is a more accurate but slower version of
percpu_counter_read_positive().
We need this for Alex's speedup-ext3_statfs patch and for the nr_file
accounting fix. Otherwise these things would be too inaccurate on large CPU
counts.
Cc: Ravikiran G Thirumalai <kiran@scalex86.org>
Cc: Alex Tomas <alex@clusterfs.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/percpu_counter.h | 6 | ||||
-rw-r--r-- | mm/swap.c | 25 |
2 files changed, 29 insertions, 2 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index bd6708e..6825255 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -39,6 +39,7 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc) } void percpu_counter_mod(struct percpu_counter *fbc, long amount); +long percpu_counter_sum(struct percpu_counter *fbc); static inline long percpu_counter_read(struct percpu_counter *fbc) { @@ -92,6 +93,11 @@ static inline long percpu_counter_read_positive(struct percpu_counter *fbc) return fbc->count; } +static inline long percpu_counter_sum(struct percpu_counter *fbc) +{ + return percpu_counter_read_positive(fbc); +} + #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) @@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount) if (count >= FBC_BATCH || count <= -FBC_BATCH) { spin_lock(&fbc->lock); fbc->count += count; + *pcount = 0; spin_unlock(&fbc->lock); - count = 0; + } else { + *pcount = count; } - *pcount = count; put_cpu(); } EXPORT_SYMBOL(percpu_counter_mod); + +/* + * Add up all the per-cpu counts, return the result. This is a more accurate + * but much slower version of percpu_counter_read_positive() + */ +long percpu_counter_sum(struct percpu_counter *fbc) +{ + long ret; + int cpu; + + spin_lock(&fbc->lock); + ret = fbc->count; + for_each_cpu(cpu) { + long *pcount = per_cpu_ptr(fbc->counters, cpu); + ret += *pcount; + } + spin_unlock(&fbc->lock); + return ret < 0 ? 0 : ret; +} +EXPORT_SYMBOL(percpu_counter_sum); #endif /* |