summaryrefslogtreecommitdiffstats
path: root/fs/xfs/support/ktrace.c
diff options
context:
space:
mode:
authorDavid Chinner <dgc@sgi.com>2008-03-06 13:45:43 +1100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-04-18 11:40:04 +1000
commitd234154125197053d5215711b5df867979e55ebd (patch)
tree366a6ff91b514a8f9846ac82926e0bbfd0a1fbed /fs/xfs/support/ktrace.c
parent6ee4752ffe782be6e86bea1403a2fe0f682aa71a (diff)
downloadop-kernel-dev-d234154125197053d5215711b5df867979e55ebd.zip
op-kernel-dev-d234154125197053d5215711b5df867979e55ebd.tar.gz
[XFS] Use power-of-2 sized buffers to reduce overhead
Now that the ktrace_enter() code is using atomics, the non-power-of-2 buffer sizes - which require modulus operations to get the index - are showing up as using substantial CPU in the profiles. Force the buffer sizes to be rounded up to the nearest power of two and use masking rather than modulus operations to convert the index counter to the buffer index. This reduces ktrace_enter overhead to 8% of a CPU time, and again almost halves the trace intensive test runtime. SGI-PV: 977546 SGI-Modid: xfs-linux-melb:xfs-kern:30538a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs/support/ktrace.c')
-rw-r--r--fs/xfs/support/ktrace.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
index 4e0444c..0b75d30 100644
--- a/fs/xfs/support/ktrace.c
+++ b/fs/xfs/support/ktrace.c
@@ -24,7 +24,7 @@ static int ktrace_zentries;
void __init
ktrace_init(int zentries)
{
- ktrace_zentries = zentries;
+ ktrace_zentries = roundup_pow_of_two(zentries);
ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
"ktrace_hdr");
@@ -47,13 +47,16 @@ ktrace_uninit(void)
* ktrace_alloc()
*
* Allocate a ktrace header and enough buffering for the given
- * number of entries.
+ * number of entries. Round the number of entries up to a
+ * power of 2 so we can do fast masking to get the index from
+ * the atomic index counter.
*/
ktrace_t *
ktrace_alloc(int nentries, unsigned int __nocast sleep)
{
ktrace_t *ktp;
ktrace_entry_t *ktep;
+ int entries;
ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
@@ -70,11 +73,12 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
/*
* Special treatment for buffers with the ktrace_zentries entries
*/
- if (nentries == ktrace_zentries) {
+ entries = roundup_pow_of_two(nentries);
+ if (entries == ktrace_zentries) {
ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
sleep);
} else {
- ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
+ ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
sleep | KM_LARGE);
}
@@ -91,7 +95,9 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
}
ktp->kt_entries = ktep;
- ktp->kt_nentries = nentries;
+ ktp->kt_nentries = entries;
+ ASSERT(is_power_of_2(entries));
+ ktp->kt_index_mask = entries - 1;
atomic_set(&ktp->kt_index, 0);
ktp->kt_rollover = 0;
return ktp;
@@ -160,7 +166,7 @@ ktrace_enter(
* Grab an entry by pushing the index up to the next one.
*/
index = atomic_add_return(1, &ktp->kt_index);
- index = (index - 1) % ktp->kt_nentries;
+ index = (index - 1) & ktp->kt_index_mask;
if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
ktp->kt_rollover = 1;
@@ -197,7 +203,7 @@ ktrace_nentries(
if (ktp == NULL)
return 0;
- index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
+ index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
return (ktp->kt_rollover ? ktp->kt_nentries : index);
}
@@ -223,7 +229,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
int nentries;
if (ktp->kt_rollover)
- index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
+ index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
else
index = 0;
OpenPOWER on IntegriCloud