diff options
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/support/ktrace.c | 22 | ||||
-rw-r--r-- | fs/xfs/support/ktrace.h | 1 |
2 files changed, 15 insertions, 8 deletions
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index 4e0444c..0b75d30 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c @@ -24,7 +24,7 @@ static int ktrace_zentries; void __init ktrace_init(int zentries) { - ktrace_zentries = zentries; + ktrace_zentries = roundup_pow_of_two(zentries); ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), "ktrace_hdr"); @@ -47,13 +47,16 @@ ktrace_uninit(void) * ktrace_alloc() * * Allocate a ktrace header and enough buffering for the given - * number of entries. + * number of entries. Round the number of entries up to a + * power of 2 so we can do fast masking to get the index from + * the atomic index counter. */ ktrace_t * ktrace_alloc(int nentries, unsigned int __nocast sleep) { ktrace_t *ktp; ktrace_entry_t *ktep; + int entries; ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); @@ -70,11 +73,12 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) /* * Special treatment for buffers with the ktrace_zentries entries */ - if (nentries == ktrace_zentries) { + entries = roundup_pow_of_two(nentries); + if (entries == ktrace_zentries) { ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, sleep); } else { - ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), + ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)), sleep | KM_LARGE); } @@ -91,7 +95,9 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) } ktp->kt_entries = ktep; - ktp->kt_nentries = nentries; + ktp->kt_nentries = entries; + ASSERT(is_power_of_2(entries)); + ktp->kt_index_mask = entries - 1; atomic_set(&ktp->kt_index, 0); ktp->kt_rollover = 0; return ktp; @@ -160,7 +166,7 @@ ktrace_enter( * Grab an entry by pushing the index up to the next one. */ index = atomic_add_return(1, &ktp->kt_index); - index = (index - 1) % ktp->kt_nentries; + index = (index - 1) & ktp->kt_index_mask; if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) ktp->kt_rollover = 1; @@ -197,7 +203,7 @@ ktrace_nentries( if (ktp == NULL) return 0; - index = atomic_read(&ktp->kt_index) % ktp->kt_nentries; + index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; return (ktp->kt_rollover ? ktp->kt_nentries : index); } @@ -223,7 +229,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) int nentries; if (ktp->kt_rollover) - index = atomic_read(&ktp->kt_index) % ktp->kt_nentries; + index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; else index = 0; diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h index 782dbbb..741d694 100644 --- a/fs/xfs/support/ktrace.h +++ b/fs/xfs/support/ktrace.h @@ -31,6 +31,7 @@ typedef struct ktrace_entry { typedef struct ktrace { int kt_nentries; /* number of entries in trace buf */ atomic_t kt_index; /* current index in entries */ + unsigned int kt_index_mask; int kt_rollover; ktrace_entry_t *kt_entries; /* buffer of entries */ } ktrace_t; |