summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2013-12-10 13:10:37 -0700
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2014-01-27 19:20:02 -0500
commitd4b4ff8e28b474fac0fbfa9cfc40f88b9e41e380 (patch)
tree356576d531535d11ebcf321f92131e0e4513ee32 /drivers/block
parent9a6b94584de1a0467d85b435df9c744c5c45a270 (diff)
downloadop-kernel-dev-d4b4ff8e28b474fac0fbfa9cfc40f88b9e41e380.zip
op-kernel-dev-d4b4ff8e28b474fac0fbfa9cfc40f88b9e41e380.tar.gz
NVMe: Schedule reset for failed controllers
Schedules a controller reset when it indicates it has a failed status. If the device does not become ready after a reset, the pci device will be scheduled for removal. Signed-off-by: Keith Busch <keith.busch@intel.com> [fixed checkpatch issue] Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 000bca4..2f5b9f5 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -60,6 +60,8 @@ static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq;
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
/*
* An NVM Express queue. Each device has at least two (one for admin
* commands and one for I/O commands).
@@ -1612,13 +1614,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
static int nvme_kthread(void *data)
{
- struct nvme_dev *dev;
+ struct nvme_dev *dev, *next;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node) {
+ list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+ dev->initialized) {
+ if (work_busy(&dev->reset_work))
+ continue;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "Failed status, reset controller\n");
+ INIT_WORK(&dev->reset_work,
+ nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ continue;
+ }
for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
@@ -2006,6 +2020,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
+ dev->initialized = 0;
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_disable_queue(dev, i);
@@ -2196,6 +2211,7 @@ static int nvme_dev_resume(struct nvme_dev *dev)
queue_work(nvme_workq, &dev->reset_work);
spin_unlock(&dev_list_lock);
}
+ dev->initialized = 1;
return 0;
}
@@ -2269,6 +2285,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto remove;
+ dev->initialized = 1;
kref_init(&dev->kref);
return 0;
OpenPOWER on IntegriCloud