summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-23 11:05:19 +0900
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 07:37:35 +0200
commit40cbbb781d3eba5d6ac0860db078af490e5c7c6b (patch)
treedec374543cf045fc630bccddbb7646c695094b0d /block
parentb243ddcbe9be146172baa544dadecebf156eda0e (diff)
downloadop-kernel-dev-40cbbb781d3eba5d6ac0860db078af490e5c7c6b.zip
op-kernel-dev-40cbbb781d3eba5d6ac0860db078af490e5c7c6b.tar.gz
block: implement and use [__]blk_end_request_all()
There are many [__]blk_end_request() call sites which call it with full request length and expect full completion. Many of them ensure that the request actually completes by doing BUG_ON() the return value, which is awkward and error-prone. This patch adds [__]blk_end_request_all() which takes @rq and @error and fully completes the request. BUG_ON() is added to to ensure that this actually happens. Most conversions are simple but there are a few noteworthy ones. * cdrom/viocd: viocd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/block/dasd: dasd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/char/tape_block: tapeblock_end_request() replaced with direct calls to blk_end_request_all(). [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Mike Miller <mike.miller@hp.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c9
-rw-r--r--block/blk-core.c2
-rw-r--r--block/elevator.c2
3 files changed, 4 insertions, 9 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 20b4111..c8d0876 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
*/
q->ordseq = 0;
rq = q->orig_bar_rq;
-
- if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
- BUG();
-
+ __blk_end_request_all(rq, q->orderr);
return true;
}
@@ -252,9 +249,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
* with prejudice.
*/
elv_dequeue_request(q, rq);
- if (__blk_end_request(rq, -EOPNOTSUPP,
- blk_rq_bytes(rq)))
- BUG();
+ __blk_end_request_all(rq, -EOPNOTSUPP);
*rqp = NULL;
return false;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index b84250d..0520cc7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1780,7 +1780,7 @@ struct request *elv_next_request(struct request_queue *q)
break;
} else if (ret == BLKPREP_KILL) {
rq->cmd_flags |= REQ_QUIET;
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ __blk_end_request_all(rq, -EIO);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
diff --git a/block/elevator.c b/block/elevator.c
index b03b875..1af5d9f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -810,7 +810,7 @@ void elv_abort_queue(struct request_queue *q)
rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET;
trace_block_rq_abort(q, rq);
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ __blk_end_request_all(rq, -EIO);
}
}
EXPORT_SYMBOL(elv_abort_queue);
OpenPOWER on IntegriCloud