summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2017-07-04 18:16:58 +0300
committerSagi Grimberg <sagi@grimberg.me>2017-07-06 09:48:59 +0300
commit8d7b8fafad87c3404f72ce2d36c79c48be1129a6 (patch)
treec1013f5318f1b0fa84e4a0b30d5f9b67a7b2bef0 /drivers
parentc81545f991a6612d3bdab18a71b3487023ec6b69 (diff)
downloadop-kernel-dev-8d7b8fafad87c3404f72ce2d36c79c48be1129a6.zip
op-kernel-dev-8d7b8fafad87c3404f72ce2d36c79c48be1129a6.tar.gz
nvme: kick requeue list when requeueing a request instead of when starting the queues
When we requeue a request, we can always insert the request back to the scheduler instead of doing it when restarting the queues and kicking the requeue work, so get rid of the requeue kick in nvme (core and drivers). Also, now there is no need start hw queues in nvme_kill_queues We don't stop the hw queues anymore, so no need to start them. Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/nvme/host/core.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d70df1d..48cafaa 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -131,7 +131,7 @@ void nvme_complete_rq(struct request *req)
{
if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
nvme_req(req)->retries++;
- blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
+ blk_mq_requeue_request(req, true);
return;
}
@@ -2694,9 +2694,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ctrl->admin_q);
- /* Forcibly start all queues to avoid having stuck requests */
- blk_mq_start_hw_queues(ctrl->admin_q);
-
list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
* Revalidating a dead namespace sets capacity to 0. This will
@@ -2709,16 +2706,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
-
- /*
- * Forcibly start all queues to avoid having stuck requests.
- * Note that we must ensure the queues are not stopped
- * when the final removal happens.
- */
- blk_mq_start_hw_queues(ns->queue);
-
- /* draining requests in requeue list */
- blk_mq_kick_requeue_list(ns->queue);
}
mutex_unlock(&ctrl->namespaces_mutex);
}
@@ -2787,10 +2774,8 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
struct nvme_ns *ns;
mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ list_for_each_entry(ns, &ctrl->namespaces, list)
blk_mq_unquiesce_queue(ns->queue);
- blk_mq_kick_requeue_list(ns->queue);
- }
mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
OpenPOWER on IntegriCloud