summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-04-07 15:39:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 16:36:14 -0700
commit88da03a67674bcd6e9ecf18a0a182cf1303056ba (patch)
treef822c90ff7a8e62e6d7c3c82854b510786e7222b /mm/slub.c
parent3ed66e910c91eb914b5c1f2d434538fe68bb8a56 (diff)
downloadop-kernel-dev-88da03a67674bcd6e9ecf18a0a182cf1303056ba.zip
op-kernel-dev-88da03a67674bcd6e9ecf18a0a182cf1303056ba.tar.gz
slub: use raw_cpu_inc for incrementing statistics
Statistics are not critical to the operation of the allocation but should also not cause too much overhead. When __this_cpu_inc is altered to check if preemption is disabled this triggers. Use raw_cpu_inc to avoid the checks. Using this_cpu_ops may cause interrupt disable/enable sequences on various arches which may significantly impact allocator performance. [akpm@linux-foundation.org: add comment] Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e745186..f620bbf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -224,7 +224,11 @@ static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
- __this_cpu_inc(s->cpu_slab->stat[si]);
+ /*
+ * The rmw is racy on a preemptible kernel but this is acceptable, so
+ * avoid this_cpu_add()'s irq-disable overhead.
+ */
+ raw_cpu_inc(s->cpu_slab->stat[si]);
#endif
}
OpenPOWER on IntegriCloud