diff options
author | Hannes Reinecke <hare@suse.de> | 2013-01-30 09:26:14 +0000 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-07-01 17:31:20 +0200 |
commit | a2ace46632fb38c7a3771f2f0d235a4295e83bcf (patch) | |
tree | 3d69366b49c18967fda51cb6b0355148f91f7d6f /drivers/s390/block/dasd.c | |
parent | 1fbdb8be9bfc91efd45720493c7ecae884ae22bd (diff) | |
download | op-kernel-dev-a2ace46632fb38c7a3771f2f0d235a4295e83bcf.zip op-kernel-dev-a2ace46632fb38c7a3771f2f0d235a4295e83bcf.tar.gz |
s390/dasd: Implement block timeout handling
This patch implements generic block layer timeout handling
callbacks for DASDs. When the timeout expires the respective
cqr is aborted.
With this timeout handler time-critical request abort
is guaranteed as the abort does not depend on the internal
state of the various DASD driver queues.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Acked-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/block/dasd.c')
-rw-r--r-- | drivers/s390/block/dasd.c | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 000e5140..87478be 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2573,8 +2573,10 @@ static void __dasd_process_request_queue(struct dasd_block *block) */ cqr->callback_data = (void *) req; cqr->status = DASD_CQR_FILLED; + req->completion_data = cqr; blk_start_request(req); list_add_tail(&cqr->blocklist, &block->ccw_queue); + INIT_LIST_HEAD(&cqr->devlist); dasd_profile_start(block, cqr, req); } } @@ -2862,6 +2864,80 @@ static void do_dasd_request(struct request_queue *queue) } /* + * Block timeout callback, called from the block layer + * + * request_queue lock is held on entry. + * + * Return values: + * BLK_EH_RESET_TIMER if the request should be left running + * BLK_EH_NOT_HANDLED if the request is handled or terminated + * by the driver. + */ +enum blk_eh_timer_return dasd_times_out(struct request *req) +{ + struct dasd_ccw_req *cqr = req->completion_data; + struct dasd_block *block = req->q->queuedata; + struct dasd_device *device; + int rc = 0; + + if (!cqr) + return BLK_EH_NOT_HANDLED; + + device = cqr->startdev ? cqr->startdev : block->base; + DBF_DEV_EVENT(DBF_WARNING, device, + " dasd_times_out cqr %p status %x", + cqr, cqr->status); + + spin_lock(&block->queue_lock); + spin_lock(get_ccwdev_lock(device->cdev)); + cqr->retries = -1; + cqr->intrc = -ETIMEDOUT; + if (cqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(cqr); + } else if (cqr->status == DASD_CQR_FILLED || + cqr->status == DASD_CQR_NEED_ERP) { + cqr->status = DASD_CQR_TERMINATED; + spin_unlock(get_ccwdev_lock(device->cdev)); + } else if (cqr->status == DASD_CQR_IN_ERP) { + struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; + + list_for_each_entry_safe(searchcqr, nextcqr, + &block->ccw_queue, blocklist) { + tmpcqr = searchcqr; + while (tmpcqr->refers) + tmpcqr = tmpcqr->refers; + if (tmpcqr != cqr) + continue; + /* searchcqr is an ERP request for cqr */ + searchcqr->retries = -1; + searchcqr->intrc = -ETIMEDOUT; + if (searchcqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(searchcqr); + spin_lock(get_ccwdev_lock(device->cdev)); + } else if ((searchcqr->status == DASD_CQR_FILLED) || + (searchcqr->status == DASD_CQR_NEED_ERP)) { + searchcqr->status = DASD_CQR_TERMINATED; + rc = 0; + } else if (searchcqr->status == DASD_CQR_IN_ERP) { + /* + * Shouldn't happen; most recent ERP + * request is at the front of queue + */ + continue; + } + break; + } + spin_unlock(get_ccwdev_lock(device->cdev)); + } + dasd_schedule_block_bh(block); + spin_unlock(&block->queue_lock); + + return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + +/* * Allocate and initialize request queue and default I/O scheduler. */ static int dasd_alloc_queue(struct dasd_block *block) |