diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-14 00:33:39 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-14 00:33:39 +0100 |
commit | b50b636bce6293fa858cc7ff6c3ffe4920d90006 (patch) | |
tree | 65a4c509ff491aa89bf4d1a5f3e26d600eeea49f /block/cfq-iosched.c | |
parent | b9a1920837bc53430d339380e393a6e4c372939f (diff) | |
download | op-kernel-dev-b50b636bce6293fa858cc7ff6c3ffe4920d90006.zip op-kernel-dev-b50b636bce6293fa858cc7ff6c3ffe4920d90006.tar.gz |
block, cfq: kill ioc_gone
Now that cic's are immediately unlinked under both locks, there's no
need to count and drain cic's before module unload. RCU callback
completion is waited with rcu_barrier().
While at it, remove residual RCU operations on cic_list.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 43 |
1 files changed, 5 insertions, 38 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ff44435..ae7791a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -62,10 +62,6 @@ static const int cfq_hist_divisor = 4; static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_ioc_pool; -static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); -static struct completion *ioc_gone; -static DEFINE_SPINLOCK(ioc_gone_lock); - #define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) @@ -2671,26 +2667,8 @@ static void cfq_put_queue(struct cfq_queue *cfqq) static void cfq_cic_free_rcu(struct rcu_head *head) { - struct cfq_io_context *cic; - - cic = container_of(head, struct cfq_io_context, rcu_head); - - kmem_cache_free(cfq_ioc_pool, cic); - elv_ioc_count_dec(cfq_ioc_count); - - if (ioc_gone) { - /* - * CFQ scheduler is exiting, grab exit lock and check - * the pending io context count. If it hits zero, - * complete ioc_gone and set it back to NULL - */ - spin_lock(&ioc_gone_lock); - if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { - complete(ioc_gone); - ioc_gone = NULL; - } - spin_unlock(&ioc_gone_lock); - } + kmem_cache_free(cfq_ioc_pool, + container_of(head, struct cfq_io_context, rcu_head)); } static void cfq_cic_free(struct cfq_io_context *cic) @@ -2705,7 +2683,7 @@ static void cfq_release_cic(struct cfq_io_context *cic) BUG_ON(!(dead_key & CIC_DEAD_KEY)); radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT); - hlist_del_rcu(&cic->cic_list); + hlist_del(&cic->cic_list); cfq_cic_free(cic); } @@ -2782,7 +2760,6 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) INIT_HLIST_NODE(&cic->cic_list); cic->exit = cfq_exit_cic; cic->release = cfq_release_cic; - elv_ioc_count_inc(cfq_ioc_count); } return cic; @@ -3072,7 +3049,7 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) ret = radix_tree_insert(&ioc->radix_root, q->id, cic); if (likely(!ret)) { - hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); + hlist_add_head(&cic->cic_list, &ioc->cic_list); list_add(&cic->queue_list, &cfqd->cic_list); cic = NULL; } else if (ret == -EEXIST) { @@ -4156,19 +4133,9 @@ static int __init cfq_init(void) static void __exit cfq_exit(void) { - DECLARE_COMPLETION_ONSTACK(all_gone); blkio_policy_unregister(&blkio_policy_cfq); elv_unregister(&iosched_cfq); - ioc_gone = &all_gone; - /* ioc_gone's update must be visible before reading ioc_count */ - smp_wmb(); - - /* - * this also protects us from entering cfq_slab_kill() with - * pending RCU callbacks - */ - if (elv_ioc_count_read(cfq_ioc_count)) - wait_for_completion(&all_gone); + rcu_barrier(); /* make sure all cic RCU frees are complete */ cfq_slab_kill(); } |