diff options
author | Tejun Heo <tj@kernel.org> | 2012-02-15 09:45:52 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-02-15 09:45:52 +0100 |
commit | 2274b029f640cd652ab59c363e5beebf5f50e609 (patch) | |
tree | 652a2e774c537b83cd70481a936f5c7485436491 /block/blk-ioc.c | |
parent | d705ae6b133f9f6a8beee617b1224b6a5c99c5da (diff) | |
download | op-kernel-dev-2274b029f640cd652ab59c363e5beebf5f50e609.zip op-kernel-dev-2274b029f640cd652ab59c363e5beebf5f50e609.tar.gz |
block: simplify ioc_release_fn()
Reverse double lock dancing in ioc_release_fn() can be simplified by
just using trylock on the queue_lock and back out from ioc lock on
trylock failure. Simplify it.
Signed-off-by: Tejun Heo <tj@kernel.org>
Tested-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 46 |
1 files changed, 10 insertions, 36 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 811879c..f53c80e 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -79,7 +79,6 @@ static void ioc_release_fn(struct work_struct *work) { struct io_context *ioc = container_of(work, struct io_context, release_work); - struct request_queue *last_q = NULL; unsigned long flags; /* @@ -93,44 +92,19 @@ static void ioc_release_fn(struct work_struct *work) while (!hlist_empty(&ioc->icq_list)) { struct io_cq *icq = hlist_entry(ioc->icq_list.first, struct io_cq, ioc_node); - struct request_queue *this_q = icq->q; - - if (this_q != last_q) { - /* - * Need to switch to @this_q. Once we release - * @ioc->lock, it can go away along with @cic. - * Hold on to it. - */ - __blk_get_queue(this_q); - - /* - * blk_put_queue() might sleep thanks to kobject - * idiocy. Always release both locks, put and - * restart. - */ - if (last_q) { - spin_unlock(last_q->queue_lock); - spin_unlock_irqrestore(&ioc->lock, flags); - blk_put_queue(last_q); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); - } - - last_q = this_q; - spin_lock_irqsave(this_q->queue_lock, flags); - spin_lock_nested(&ioc->lock, 1); - continue; + struct request_queue *q = icq->q; + + if (spin_trylock(q->queue_lock)) { + ioc_exit_icq(icq); + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); + cpu_relax(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); } - ioc_exit_icq(icq); } - if (last_q) { - spin_unlock(last_q->queue_lock); - spin_unlock_irqrestore(&ioc->lock, flags); - blk_put_queue(last_q); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); - } + spin_unlock_irqrestore(&ioc->lock, flags); kmem_cache_free(iocontext_cachep, ioc); } |