|
@@ -99,6 +99,7 @@ struct nvme_dev {
|
|
|
dma_addr_t cmb_dma_addr;
|
|
|
u64 cmb_size;
|
|
|
u32 cmbsz;
|
|
|
+ u32 cmbloc;
|
|
|
struct nvme_ctrl ctrl;
|
|
|
struct completion ioq_wait;
|
|
|
};
|
|
@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|
|
"I/O %d QID %d timeout, reset controller\n",
|
|
|
req->tag, nvmeq->qid);
|
|
|
nvme_dev_disable(dev, false);
|
|
|
- queue_work(nvme_workq, &dev->reset_work);
|
|
|
+ nvme_reset(dev);
|
|
|
|
|
|
/*
|
|
|
* Mark the request as handled, since the inline shutdown
|
|
@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
|
|
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
|
|
|
struct nvme_queue *nvmeq;
|
|
|
|
|
|
- dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
|
|
|
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
|
|
|
NVME_CAP_NSSRC(cap) : 0;
|
|
|
|
|
|
if (dev->subsystem &&
|
|
@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
|
|
|
|
|
|
/* Skip controllers under certain specific conditions. */
|
|
|
if (nvme_should_reset(dev, csts)) {
|
|
|
- if (queue_work(nvme_workq, &dev->reset_work))
|
|
|
+ if (!nvme_reset(dev))
|
|
|
dev_warn(dev->dev,
|
|
|
"Failed status: 0x%x, reset controller.\n",
|
|
|
csts);
|
|
@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
|
|
|
return ret >= 0 ? 0 : ret;
|
|
|
}
|
|
|
|
|
|
+static ssize_t nvme_cmb_show(struct device *dev,
|
|
|
+ struct device_attribute *attr,
|
|
|
+ char *buf)
|
|
|
+{
|
|
|
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
|
|
|
+
|
|
|
+ return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
|
|
|
+ ndev->cmbloc, ndev->cmbsz);
|
|
|
+}
|
|
|
+static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
|
|
|
+
|
|
|
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
|
|
{
|
|
|
u64 szu, size, offset;
|
|
|
- u32 cmbloc;
|
|
|
resource_size_t bar_size;
|
|
|
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
|
|
void __iomem *cmb;
|
|
|
dma_addr_t dma_addr;
|
|
|
|
|
|
- if (!use_cmb_sqes)
|
|
|
- return NULL;
|
|
|
-
|
|
|
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
|
|
|
if (!(NVME_CMB_SZ(dev->cmbsz)))
|
|
|
return NULL;
|
|
|
+ dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
|
|
|
|
|
|
- cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
|
|
|
+ if (!use_cmb_sqes)
|
|
|
+ return NULL;
|
|
|
|
|
|
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
|
|
|
size = szu * NVME_CMB_SZ(dev->cmbsz);
|
|
|
- offset = szu * NVME_CMB_OFST(cmbloc);
|
|
|
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
|
|
|
+ offset = szu * NVME_CMB_OFST(dev->cmbloc);
|
|
|
+ bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
|
|
|
|
|
|
if (offset > bar_size)
|
|
|
return NULL;
|
|
@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
|
|
if (size > bar_size - offset)
|
|
|
size = bar_size - offset;
|
|
|
|
|
|
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
|
|
|
+ dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
|
|
|
cmb = ioremap_wc(dma_addr, size);
|
|
|
if (!cmb)
|
|
|
return NULL;
|
|
@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void nvme_disable_io_queues(struct nvme_dev *dev)
|
|
|
+static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
|
|
|
{
|
|
|
- int pass, queues = dev->online_queues - 1;
|
|
|
+ int pass;
|
|
|
unsigned long timeout;
|
|
|
u8 opcode = nvme_admin_delete_sq;
|
|
|
|
|
@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
|
|
dev->q_depth);
|
|
|
}
|
|
|
|
|
|
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
|
|
|
+ /*
|
|
|
+ * CMBs can currently only exist on >=1.2 PCIe devices. We only
|
|
|
+ * populate sysfs if a CMB is implemented. Note that we add the
|
|
|
+ * CMB attribute to the nvme_ctrl kobj which removes the need to remove
|
|
|
+ * it on exit. Since nvme_dev_attrs_group has no name we can pass
|
|
|
+ * NULL as final argument to sysfs_add_file_to_group.
|
|
|
+ */
|
|
|
+
|
|
|
+ if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
|
|
|
dev->cmb = nvme_map_cmb(dev);
|
|
|
|
|
|
+ if (dev->cmbsz) {
|
|
|
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
|
|
|
+ &dev_attr_cmb.attr, NULL))
|
|
|
+ dev_warn(dev->dev,
|
|
|
+ "failed to add sysfs attribute for CMB\n");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
pci_enable_pcie_error_reporting(pdev);
|
|
|
pci_save_state(pdev);
|
|
|
return 0;
|
|
@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
|
|
|
|
|
|
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|
|
{
|
|
|
- int i;
|
|
|
+ int i, queues;
|
|
|
u32 csts = -1;
|
|
|
|
|
|
del_timer_sync(&dev->watchdog_timer);
|
|
@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|
|
csts = readl(dev->bar + NVME_REG_CSTS);
|
|
|
}
|
|
|
|
|
|
+ queues = dev->online_queues - 1;
|
|
|
for (i = dev->queue_count - 1; i > 0; i--)
|
|
|
nvme_suspend_queue(dev->queues[i]);
|
|
|
|
|
@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|
|
if (dev->queue_count)
|
|
|
nvme_suspend_queue(dev->queues[0]);
|
|
|
} else {
|
|
|
- nvme_disable_io_queues(dev);
|
|
|
+ nvme_disable_io_queues(dev, queues);
|
|
|
nvme_disable_admin_queue(dev, shutdown);
|
|
|
}
|
|
|
nvme_pci_disable(dev);
|
|
@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
|
|
|
{
|
|
|
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
|
|
|
return -ENODEV;
|
|
|
-
|
|
|
+ if (work_busy(&dev->reset_work))
|
|
|
+ return -ENODEV;
|
|
|
if (!queue_work(nvme_workq, &dev->reset_work))
|
|
|
return -EBUSY;
|
|
|
-
|
|
|
- flush_work(&dev->reset_work);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
|
|
|
|
|
|
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
|
|
|
{
|
|
|
- return nvme_reset(to_nvme_dev(ctrl));
|
|
|
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
|
|
|
+ int ret = nvme_reset(dev);
|
|
|
+
|
|
|
+ if (!ret)
|
|
|
+ flush_work(&dev->reset_work);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
|
@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
|
|
|
if (prepare)
|
|
|
nvme_dev_disable(dev, false);
|
|
|
else
|
|
|
- queue_work(nvme_workq, &dev->reset_work);
|
|
|
+ nvme_reset(dev);
|
|
|
}
|
|
|
|
|
|
static void nvme_shutdown(struct pci_dev *pdev)
|
|
@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
|
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
struct nvme_dev *ndev = pci_get_drvdata(pdev);
|
|
|
|
|
|
- queue_work(nvme_workq, &ndev->reset_work);
|
|
|
+ nvme_reset(ndev);
|
|
|
return 0;
|
|
|
}
|
|
|
#endif
|
|
@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
|
|
|
|
|
|
dev_info(dev->ctrl.device, "restart after slot reset\n");
|
|
|
pci_restore_state(pdev);
|
|
|
- queue_work(nvme_workq, &dev->reset_work);
|
|
|
+ nvme_reset(dev);
|
|
|
return PCI_ERS_RESULT_RECOVERED;
|
|
|
}
|
|
|
|