summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2013-02-28 12:07:27 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-03-07 09:52:20 +0100
commit8360cb5f389ebd36b708978e0f776a285a2deb5a (patch)
treeb7b9a241facfc1097297154ea1bae4ffc4fe12ad /drivers/s390
parentf6a70a07079518280022286a1dceb797d12e1edf (diff)
downloadop-kernel-dev-8360cb5f389ebd36b708978e0f776a285a2deb5a.zip
op-kernel-dev-8360cb5f389ebd36b708978e0f776a285a2deb5a.tar.gz
s390/scm_blk: fix request number accounting
If a block device driver cannot fetch all requests from the blocklayer it's in his responsibility to call the request function at a later time. Normally this would be done after the next irq for the underlying device is handled. However in situations where we have no outstanding request we have to schedule the request function for a later time. This is determined using an internal counter of requests issued to the hardware. In some cases where we give a request back to the block layer unhandled the number of queued requests was not adjusted. Fix this class of failures by adjusting queued_requests in all functions used to give a request back to the block layer. Reviewed-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/scm_blk.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9978ad4..d9c7e94 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -195,14 +195,18 @@ void scm_request_requeue(struct scm_request *scmrq)
scm_release_cluster(scmrq);
blk_requeue_request(bdev->rq, scmrq->request);
+ atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
scm_ensure_queue_restart(bdev);
}
void scm_request_finish(struct scm_request *scmrq)
{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+
scm_release_cluster(scmrq);
blk_end_request_all(scmrq->request, scmrq->error);
+ atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
}
@@ -231,11 +235,13 @@ static void scm_blk_request(struct request_queue *rq)
return;
}
if (scm_need_cluster_request(scmrq)) {
+ atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
scm_initiate_cluster_request(scmrq);
return;
}
scm_request_prepare(scmrq);
+ atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
ret = scm_start_aob(scmrq->aob);
@@ -244,7 +250,6 @@ static void scm_blk_request(struct request_queue *rq)
scm_request_requeue(scmrq);
return;
}
- atomic_inc(&bdev->queued_reqs);
}
}
@@ -310,7 +315,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
}
scm_request_finish(scmrq);
- atomic_dec(&bdev->queued_reqs);
spin_lock_irqsave(&bdev->lock, flags);
}
spin_unlock_irqrestore(&bdev->lock, flags);
OpenPOWER on IntegriCloud