|
@@ -12,6 +12,7 @@
|
|
|
* more details.
|
|
|
*/
|
|
|
|
|
|
+#include <linux/aer.h>
|
|
|
#include <linux/bitops.h>
|
|
|
#include <linux/blkdev.h>
|
|
|
#include <linux/blk-mq.h>
|
|
@@ -28,10 +29,10 @@
|
|
|
#include <linux/kdev_t.h>
|
|
|
#include <linux/kthread.h>
|
|
|
#include <linux/kernel.h>
|
|
|
-#include <linux/list_sort.h>
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/moduleparam.h>
|
|
|
+#include <linux/mutex.h>
|
|
|
#include <linux/pci.h>
|
|
|
#include <linux/poison.h>
|
|
|
#include <linux/ptrace.h>
|
|
@@ -39,23 +40,24 @@
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/t10-pi.h>
|
|
|
#include <linux/types.h>
|
|
|
-#include <linux/pr.h>
|
|
|
-#include <scsi/sg.h>
|
|
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
|
|
#include <asm/unaligned.h>
|
|
|
|
|
|
-#include <uapi/linux/nvme_ioctl.h>
|
|
|
#include "nvme.h"
|
|
|
|
|
|
-#define NVME_MINORS (1U << MINORBITS)
|
|
|
#define NVME_Q_DEPTH 1024
|
|
|
#define NVME_AQ_DEPTH 256
|
|
|
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
|
|
|
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
|
|
|
-#define ADMIN_TIMEOUT (admin_timeout * HZ)
|
|
|
-#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
|
|
|
+
|
|
|
+/*
|
|
|
+ * We handle AEN commands ourselves and don't even let the
|
|
|
+ * block layer know about them.
|
|
|
+ */
|
|
|
+#define NVME_NR_AEN_COMMANDS 1
|
|
|
+#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
|
|
|
|
|
|
-static unsigned char admin_timeout = 60;
|
|
|
+unsigned char admin_timeout = 60;
|
|
|
module_param(admin_timeout, byte, 0644);
|
|
|
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
|
|
|
|
|
@@ -63,16 +65,10 @@ unsigned char nvme_io_timeout = 30;
|
|
|
module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
|
|
|
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
|
|
|
|
|
|
-static unsigned char shutdown_timeout = 5;
|
|
|
+unsigned char shutdown_timeout = 5;
|
|
|
module_param(shutdown_timeout, byte, 0644);
|
|
|
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
|
|
|
|
|
|
-static int nvme_major;
|
|
|
-module_param(nvme_major, int, 0);
|
|
|
-
|
|
|
-static int nvme_char_major;
|
|
|
-module_param(nvme_char_major, int, 0);
|
|
|
-
|
|
|
static int use_threaded_interrupts;
|
|
|
module_param(use_threaded_interrupts, int, 0);
|
|
|
|
|
@@ -80,28 +76,60 @@ static bool use_cmb_sqes = true;
|
|
|
module_param(use_cmb_sqes, bool, 0644);
|
|
|
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
|
|
|
|
|
|
-static DEFINE_SPINLOCK(dev_list_lock);
|
|
|
static LIST_HEAD(dev_list);
|
|
|
static struct task_struct *nvme_thread;
|
|
|
static struct workqueue_struct *nvme_workq;
|
|
|
static wait_queue_head_t nvme_kthread_wait;
|
|
|
|
|
|
-static struct class *nvme_class;
|
|
|
+struct nvme_dev;
|
|
|
+struct nvme_queue;
|
|
|
|
|
|
-static int __nvme_reset(struct nvme_dev *dev);
|
|
|
static int nvme_reset(struct nvme_dev *dev);
|
|
|
static void nvme_process_cq(struct nvme_queue *nvmeq);
|
|
|
-static void nvme_dead_ctrl(struct nvme_dev *dev);
|
|
|
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
|
|
|
+static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
|
|
|
|
|
|
-struct async_cmd_info {
|
|
|
- struct kthread_work work;
|
|
|
- struct kthread_worker *worker;
|
|
|
- struct request *req;
|
|
|
- u32 result;
|
|
|
- int status;
|
|
|
- void *ctx;
|
|
|
+/*
|
|
|
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
|
|
|
+ */
|
|
|
+struct nvme_dev {
|
|
|
+ struct list_head node;
|
|
|
+ struct nvme_queue **queues;
|
|
|
+ struct blk_mq_tag_set tagset;
|
|
|
+ struct blk_mq_tag_set admin_tagset;
|
|
|
+ u32 __iomem *dbs;
|
|
|
+ struct device *dev;
|
|
|
+ struct dma_pool *prp_page_pool;
|
|
|
+ struct dma_pool *prp_small_pool;
|
|
|
+ unsigned queue_count;
|
|
|
+ unsigned online_queues;
|
|
|
+ unsigned max_qid;
|
|
|
+ int q_depth;
|
|
|
+ u32 db_stride;
|
|
|
+ struct msix_entry *entry;
|
|
|
+ void __iomem *bar;
|
|
|
+ struct work_struct reset_work;
|
|
|
+ struct work_struct scan_work;
|
|
|
+ struct work_struct remove_work;
|
|
|
+ struct mutex shutdown_lock;
|
|
|
+ bool subsystem;
|
|
|
+ void __iomem *cmb;
|
|
|
+ dma_addr_t cmb_dma_addr;
|
|
|
+ u64 cmb_size;
|
|
|
+ u32 cmbsz;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+#define NVME_CTRL_RESETTING 0
|
|
|
+
|
|
|
+ struct nvme_ctrl ctrl;
|
|
|
+ struct completion ioq_wait;
|
|
|
};
|
|
|
|
|
|
+static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
|
|
|
+{
|
|
|
+ return container_of(ctrl, struct nvme_dev, ctrl);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* An NVM Express queue. Each device has at least two (one for admin
|
|
|
* commands and one for I/O commands).
|
|
@@ -126,7 +154,24 @@ struct nvme_queue {
|
|
|
u16 qid;
|
|
|
u8 cq_phase;
|
|
|
u8 cqe_seen;
|
|
|
- struct async_cmd_info cmdinfo;
|
|
|
+};
|
|
|
+
|
|
|
+/*
|
|
|
+ * The nvme_iod describes the data in an I/O, including the list of PRP
|
|
|
+ * entries. You can't see it in this data structure because C doesn't let
|
|
|
+ * me express that. Use nvme_init_iod to ensure there's enough space
|
|
|
+ * allocated to store the PRP list.
|
|
|
+ */
|
|
|
+struct nvme_iod {
|
|
|
+ struct nvme_queue *nvmeq;
|
|
|
+ int aborted;
|
|
|
+ int npages; /* In the PRP list. 0 means small pool in use */
|
|
|
+ int nents; /* Used in scatterlist */
|
|
|
+ int length; /* Of data, in bytes */
|
|
|
+ dma_addr_t first_dma;
|
|
|
+ struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
|
|
|
+ struct scatterlist *sg;
|
|
|
+ struct scatterlist inline_sg[0];
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -148,23 +193,11 @@ static inline void _nvme_check_size(void)
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
|
|
|
}
|
|
|
|
|
|
-typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
|
|
|
- struct nvme_completion *);
|
|
|
-
|
|
|
-struct nvme_cmd_info {
|
|
|
- nvme_completion_fn fn;
|
|
|
- void *ctx;
|
|
|
- int aborted;
|
|
|
- struct nvme_queue *nvmeq;
|
|
|
- struct nvme_iod iod[0];
|
|
|
-};
|
|
|
-
|
|
|
/*
|
|
|
* Max size of iod being embedded in the request payload
|
|
|
*/
|
|
|
#define NVME_INT_PAGES 2
|
|
|
-#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size)
|
|
|
-#define NVME_INT_MASK 0x01
|
|
|
+#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size)
|
|
|
|
|
|
/*
|
|
|
* Will slightly overestimate the number of pages needed. This is OK
|
|
@@ -173,19 +206,22 @@ struct nvme_cmd_info {
|
|
|
*/
|
|
|
static int nvme_npages(unsigned size, struct nvme_dev *dev)
|
|
|
{
|
|
|
- unsigned nprps = DIV_ROUND_UP(size + dev->page_size, dev->page_size);
|
|
|
+ unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
|
|
|
+ dev->ctrl.page_size);
|
|
|
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
|
|
|
}
|
|
|
|
|
|
-static unsigned int nvme_cmd_size(struct nvme_dev *dev)
|
|
|
+static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
|
|
|
+ unsigned int size, unsigned int nseg)
|
|
|
{
|
|
|
- unsigned int ret = sizeof(struct nvme_cmd_info);
|
|
|
-
|
|
|
- ret += sizeof(struct nvme_iod);
|
|
|
- ret += sizeof(__le64 *) * nvme_npages(NVME_INT_BYTES(dev), dev);
|
|
|
- ret += sizeof(struct scatterlist) * NVME_INT_PAGES;
|
|
|
+ return sizeof(__le64 *) * nvme_npages(size, dev) +
|
|
|
+ sizeof(struct scatterlist) * nseg;
|
|
|
+}
|
|
|
|
|
|
- return ret;
|
|
|
+static unsigned int nvme_cmd_size(struct nvme_dev *dev)
|
|
|
+{
|
|
|
+ return sizeof(struct nvme_iod) +
|
|
|
+ nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
|
|
|
}
|
|
|
|
|
|
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|
@@ -215,11 +251,11 @@ static int nvme_admin_init_request(void *data, struct request *req,
|
|
|
unsigned int numa_node)
|
|
|
{
|
|
|
struct nvme_dev *dev = data;
|
|
|
- struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
struct nvme_queue *nvmeq = dev->queues[0];
|
|
|
|
|
|
BUG_ON(!nvmeq);
|
|
|
- cmd->nvmeq = nvmeq;
|
|
|
+ iod->nvmeq = nvmeq;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -242,148 +278,36 @@ static int nvme_init_request(void *data, struct request *req,
|
|
|
unsigned int numa_node)
|
|
|
{
|
|
|
struct nvme_dev *dev = data;
|
|
|
- struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
|
|
|
|
|
|
BUG_ON(!nvmeq);
|
|
|
- cmd->nvmeq = nvmeq;
|
|
|
+ iod->nvmeq = nvmeq;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
|
|
|
- nvme_completion_fn handler)
|
|
|
+static void nvme_complete_async_event(struct nvme_dev *dev,
|
|
|
+ struct nvme_completion *cqe)
|
|
|
{
|
|
|
- cmd->fn = handler;
|
|
|
- cmd->ctx = ctx;
|
|
|
- cmd->aborted = 0;
|
|
|
- blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
|
|
|
-}
|
|
|
-
|
|
|
-static void *iod_get_private(struct nvme_iod *iod)
|
|
|
-{
|
|
|
- return (void *) (iod->private & ~0x1UL);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * If bit 0 is set, the iod is embedded in the request payload.
|
|
|
- */
|
|
|
-static bool iod_should_kfree(struct nvme_iod *iod)
|
|
|
-{
|
|
|
- return (iod->private & NVME_INT_MASK) == 0;
|
|
|
-}
|
|
|
-
|
|
|
-/* Special values must be less than 0x1000 */
|
|
|
-#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
|
|
|
-#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
|
|
|
-#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
|
|
|
-#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
|
|
|
-
|
|
|
-static void special_completion(struct nvme_queue *nvmeq, void *ctx,
|
|
|
- struct nvme_completion *cqe)
|
|
|
-{
|
|
|
- if (ctx == CMD_CTX_CANCELLED)
|
|
|
- return;
|
|
|
- if (ctx == CMD_CTX_COMPLETED) {
|
|
|
- dev_warn(nvmeq->q_dmadev,
|
|
|
- "completed id %d twice on queue %d\n",
|
|
|
- cqe->command_id, le16_to_cpup(&cqe->sq_id));
|
|
|
- return;
|
|
|
- }
|
|
|
- if (ctx == CMD_CTX_INVALID) {
|
|
|
- dev_warn(nvmeq->q_dmadev,
|
|
|
- "invalid id %d completed on queue %d\n",
|
|
|
- cqe->command_id, le16_to_cpup(&cqe->sq_id));
|
|
|
- return;
|
|
|
- }
|
|
|
- dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
|
|
|
-}
|
|
|
-
|
|
|
-static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
|
|
|
-{
|
|
|
- void *ctx;
|
|
|
-
|
|
|
- if (fn)
|
|
|
- *fn = cmd->fn;
|
|
|
- ctx = cmd->ctx;
|
|
|
- cmd->fn = special_completion;
|
|
|
- cmd->ctx = CMD_CTX_CANCELLED;
|
|
|
- return ctx;
|
|
|
-}
|
|
|
-
|
|
|
-static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
|
|
|
- struct nvme_completion *cqe)
|
|
|
-{
|
|
|
- u32 result = le32_to_cpup(&cqe->result);
|
|
|
- u16 status = le16_to_cpup(&cqe->status) >> 1;
|
|
|
+ u16 status = le16_to_cpu(cqe->status) >> 1;
|
|
|
+ u32 result = le32_to_cpu(cqe->result);
|
|
|
|
|
|
if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
|
|
|
- ++nvmeq->dev->event_limit;
|
|
|
+ ++dev->ctrl.event_limit;
|
|
|
if (status != NVME_SC_SUCCESS)
|
|
|
return;
|
|
|
|
|
|
switch (result & 0xff07) {
|
|
|
case NVME_AER_NOTICE_NS_CHANGED:
|
|
|
- dev_info(nvmeq->q_dmadev, "rescanning\n");
|
|
|
- schedule_work(&nvmeq->dev->scan_work);
|
|
|
+ dev_info(dev->dev, "rescanning\n");
|
|
|
+ queue_work(nvme_workq, &dev->scan_work);
|
|
|
default:
|
|
|
- dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
|
|
|
- struct nvme_completion *cqe)
|
|
|
-{
|
|
|
- struct request *req = ctx;
|
|
|
-
|
|
|
- u16 status = le16_to_cpup(&cqe->status) >> 1;
|
|
|
- u32 result = le32_to_cpup(&cqe->result);
|
|
|
-
|
|
|
- blk_mq_free_request(req);
|
|
|
-
|
|
|
- dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
|
|
|
- ++nvmeq->dev->abort_limit;
|
|
|
-}
|
|
|
-
|
|
|
-static void async_completion(struct nvme_queue *nvmeq, void *ctx,
|
|
|
- struct nvme_completion *cqe)
|
|
|
-{
|
|
|
- struct async_cmd_info *cmdinfo = ctx;
|
|
|
- cmdinfo->result = le32_to_cpup(&cqe->result);
|
|
|
- cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
|
|
|
- queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
|
|
|
- blk_mq_free_request(cmdinfo->req);
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
|
|
|
- unsigned int tag)
|
|
|
-{
|
|
|
- struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, tag);
|
|
|
-
|
|
|
- return blk_mq_rq_to_pdu(req);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Called with local interrupts disabled and the q_lock held. May not sleep.
|
|
|
- */
|
|
|
-static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
|
|
|
- nvme_completion_fn *fn)
|
|
|
-{
|
|
|
- struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag);
|
|
|
- void *ctx;
|
|
|
- if (tag >= nvmeq->q_depth) {
|
|
|
- *fn = special_completion;
|
|
|
- return CMD_CTX_INVALID;
|
|
|
+ dev_warn(dev->dev, "async event result %08x\n", result);
|
|
|
}
|
|
|
- if (fn)
|
|
|
- *fn = cmd->fn;
|
|
|
- ctx = cmd->ctx;
|
|
|
- cmd->fn = special_completion;
|
|
|
- cmd->ctx = CMD_CTX_COMPLETED;
|
|
|
- return ctx;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
|
|
|
+ * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
|
|
|
* @nvmeq: The queue to use
|
|
|
* @cmd: The command to send
|
|
|
*
|
|
@@ -405,69 +329,44 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
|
|
|
nvmeq->sq_tail = tail;
|
|
|
}
|
|
|
|
|
|
-static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- spin_lock_irqsave(&nvmeq->q_lock, flags);
|
|
|
- __nvme_submit_cmd(nvmeq, cmd);
|
|
|
- spin_unlock_irqrestore(&nvmeq->q_lock, flags);
|
|
|
-}
|
|
|
-
|
|
|
-static __le64 **iod_list(struct nvme_iod *iod)
|
|
|
-{
|
|
|
- return ((void *)iod) + iod->offset;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void iod_init(struct nvme_iod *iod, unsigned nbytes,
|
|
|
- unsigned nseg, unsigned long private)
|
|
|
-{
|
|
|
- iod->private = private;
|
|
|
- iod->offset = offsetof(struct nvme_iod, sg[nseg]);
|
|
|
- iod->npages = -1;
|
|
|
- iod->length = nbytes;
|
|
|
- iod->nents = 0;
|
|
|
-}
|
|
|
-
|
|
|
-static struct nvme_iod *
|
|
|
-__nvme_alloc_iod(unsigned nseg, unsigned bytes, struct nvme_dev *dev,
|
|
|
- unsigned long priv, gfp_t gfp)
|
|
|
+static __le64 **iod_list(struct request *req)
|
|
|
{
|
|
|
- struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
|
|
|
- sizeof(__le64 *) * nvme_npages(bytes, dev) +
|
|
|
- sizeof(struct scatterlist) * nseg, gfp);
|
|
|
-
|
|
|
- if (iod)
|
|
|
- iod_init(iod, bytes, nseg, priv);
|
|
|
-
|
|
|
- return iod;
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ return (__le64 **)(iod->sg + req->nr_phys_segments);
|
|
|
}
|
|
|
|
|
|
-static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
|
|
|
- gfp_t gfp)
|
|
|
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
|
|
|
{
|
|
|
- unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) :
|
|
|
- sizeof(struct nvme_dsm_range);
|
|
|
- struct nvme_iod *iod;
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
|
|
|
+ int nseg = rq->nr_phys_segments;
|
|
|
+ unsigned size;
|
|
|
|
|
|
- if (rq->nr_phys_segments <= NVME_INT_PAGES &&
|
|
|
- size <= NVME_INT_BYTES(dev)) {
|
|
|
- struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
|
|
|
+ if (rq->cmd_flags & REQ_DISCARD)
|
|
|
+ size = sizeof(struct nvme_dsm_range);
|
|
|
+ else
|
|
|
+ size = blk_rq_bytes(rq);
|
|
|
|
|
|
- iod = cmd->iod;
|
|
|
- iod_init(iod, size, rq->nr_phys_segments,
|
|
|
- (unsigned long) rq | NVME_INT_MASK);
|
|
|
- return iod;
|
|
|
+ if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
|
|
|
+ iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
|
|
|
+ if (!iod->sg)
|
|
|
+ return BLK_MQ_RQ_QUEUE_BUSY;
|
|
|
+ } else {
|
|
|
+ iod->sg = iod->inline_sg;
|
|
|
}
|
|
|
|
|
|
- return __nvme_alloc_iod(rq->nr_phys_segments, size, dev,
|
|
|
- (unsigned long) rq, gfp);
|
|
|
+ iod->aborted = 0;
|
|
|
+ iod->npages = -1;
|
|
|
+ iod->nents = 0;
|
|
|
+ iod->length = size;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
|
|
|
+static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
|
|
|
{
|
|
|
- const int last_prp = dev->page_size / 8 - 1;
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ const int last_prp = dev->ctrl.page_size / 8 - 1;
|
|
|
int i;
|
|
|
- __le64 **list = iod_list(iod);
|
|
|
+ __le64 **list = iod_list(req);
|
|
|
dma_addr_t prp_dma = iod->first_dma;
|
|
|
|
|
|
if (iod->npages == 0)
|
|
@@ -479,20 +378,8 @@ static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
|
|
|
prp_dma = next_prp_dma;
|
|
|
}
|
|
|
|
|
|
- if (iod_should_kfree(iod))
|
|
|
- kfree(iod);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_error_status(u16 status)
|
|
|
-{
|
|
|
- switch (status & 0x7ff) {
|
|
|
- case NVME_SC_SUCCESS:
|
|
|
- return 0;
|
|
|
- case NVME_SC_CAP_EXCEEDED:
|
|
|
- return -ENOSPC;
|
|
|
- default:
|
|
|
- return -EIO;
|
|
|
- }
|
|
|
+ if (iod->sg != iod->inline_sg)
|
|
|
+ kfree(iod->sg);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
|
@@ -549,27 +436,6 @@ static void nvme_dif_remap(struct request *req,
|
|
|
}
|
|
|
kunmap_atomic(pmap);
|
|
|
}
|
|
|
-
|
|
|
-static void nvme_init_integrity(struct nvme_ns *ns)
|
|
|
-{
|
|
|
- struct blk_integrity integrity;
|
|
|
-
|
|
|
- switch (ns->pi_type) {
|
|
|
- case NVME_NS_DPS_PI_TYPE3:
|
|
|
- integrity.profile = &t10_pi_type3_crc;
|
|
|
- break;
|
|
|
- case NVME_NS_DPS_PI_TYPE1:
|
|
|
- case NVME_NS_DPS_PI_TYPE2:
|
|
|
- integrity.profile = &t10_pi_type1_crc;
|
|
|
- break;
|
|
|
- default:
|
|
|
- integrity.profile = NULL;
|
|
|
- break;
|
|
|
- }
|
|
|
- integrity.tuple_size = ns->ms;
|
|
|
- blk_integrity_register(ns->disk, &integrity);
|
|
|
- blk_queue_max_integrity_segments(ns->queue, 1);
|
|
|
-}
|
|
|
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
|
|
static void nvme_dif_remap(struct request *req,
|
|
|
void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
|
|
@@ -581,91 +447,27 @@ static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
|
|
|
static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
|
|
|
{
|
|
|
}
|
|
|
-static void nvme_init_integrity(struct nvme_ns *ns)
|
|
|
-{
|
|
|
-}
|
|
|
#endif
|
|
|
|
|
|
-static void req_completion(struct nvme_queue *nvmeq, void *ctx,
|
|
|
- struct nvme_completion *cqe)
|
|
|
-{
|
|
|
- struct nvme_iod *iod = ctx;
|
|
|
- struct request *req = iod_get_private(iod);
|
|
|
- struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
|
|
|
- u16 status = le16_to_cpup(&cqe->status) >> 1;
|
|
|
- bool requeue = false;
|
|
|
- int error = 0;
|
|
|
-
|
|
|
- if (unlikely(status)) {
|
|
|
- if (!(status & NVME_SC_DNR || blk_noretry_request(req))
|
|
|
- && (jiffies - req->start_time) < req->timeout) {
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- requeue = true;
|
|
|
- blk_mq_requeue_request(req);
|
|
|
- spin_lock_irqsave(req->q->queue_lock, flags);
|
|
|
- if (!blk_queue_stopped(req->q))
|
|
|
- blk_mq_kick_requeue_list(req->q);
|
|
|
- spin_unlock_irqrestore(req->q->queue_lock, flags);
|
|
|
- goto release_iod;
|
|
|
- }
|
|
|
-
|
|
|
- if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
|
|
- if (cmd_rq->ctx == CMD_CTX_CANCELLED)
|
|
|
- error = -EINTR;
|
|
|
- else
|
|
|
- error = status;
|
|
|
- } else {
|
|
|
- error = nvme_error_status(status);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
|
|
- u32 result = le32_to_cpup(&cqe->result);
|
|
|
- req->special = (void *)(uintptr_t)result;
|
|
|
- }
|
|
|
-
|
|
|
- if (cmd_rq->aborted)
|
|
|
- dev_warn(nvmeq->dev->dev,
|
|
|
- "completing aborted command with status:%04x\n",
|
|
|
- error);
|
|
|
-
|
|
|
-release_iod:
|
|
|
- if (iod->nents) {
|
|
|
- dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
|
|
|
- rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
- if (blk_integrity_rq(req)) {
|
|
|
- if (!rq_data_dir(req))
|
|
|
- nvme_dif_remap(req, nvme_dif_complete);
|
|
|
- dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
|
|
|
- rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
- }
|
|
|
- }
|
|
|
- nvme_free_iod(nvmeq->dev, iod);
|
|
|
-
|
|
|
- if (likely(!requeue))
|
|
|
- blk_mq_complete_request(req, error);
|
|
|
-}
|
|
|
-
|
|
|
-/* length is in bytes. gfp flags indicates whether we may sleep. */
|
|
|
-static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
|
|
|
- int total_len, gfp_t gfp)
|
|
|
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
|
|
|
+ int total_len)
|
|
|
{
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
struct dma_pool *pool;
|
|
|
int length = total_len;
|
|
|
struct scatterlist *sg = iod->sg;
|
|
|
int dma_len = sg_dma_len(sg);
|
|
|
u64 dma_addr = sg_dma_address(sg);
|
|
|
- u32 page_size = dev->page_size;
|
|
|
+ u32 page_size = dev->ctrl.page_size;
|
|
|
int offset = dma_addr & (page_size - 1);
|
|
|
__le64 *prp_list;
|
|
|
- __le64 **list = iod_list(iod);
|
|
|
+ __le64 **list = iod_list(req);
|
|
|
dma_addr_t prp_dma;
|
|
|
int nprps, i;
|
|
|
|
|
|
length -= (page_size - offset);
|
|
|
if (length <= 0)
|
|
|
- return total_len;
|
|
|
+ return true;
|
|
|
|
|
|
dma_len -= (page_size - offset);
|
|
|
if (dma_len) {
|
|
@@ -678,7 +480,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
|
|
|
|
|
|
if (length <= page_size) {
|
|
|
iod->first_dma = dma_addr;
|
|
|
- return total_len;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
nprps = DIV_ROUND_UP(length, page_size);
|
|
@@ -690,11 +492,11 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
|
|
|
iod->npages = 1;
|
|
|
}
|
|
|
|
|
|
- prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
|
|
|
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
|
|
|
if (!prp_list) {
|
|
|
iod->first_dma = dma_addr;
|
|
|
iod->npages = -1;
|
|
|
- return (total_len - length) + page_size;
|
|
|
+ return false;
|
|
|
}
|
|
|
list[0] = prp_list;
|
|
|
iod->first_dma = prp_dma;
|
|
@@ -702,9 +504,9 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
|
|
|
for (;;) {
|
|
|
if (i == page_size >> 3) {
|
|
|
__le64 *old_prp_list = prp_list;
|
|
|
- prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
|
|
|
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
|
|
|
if (!prp_list)
|
|
|
- return total_len - length;
|
|
|
+ return false;
|
|
|
list[iod->npages++] = prp_list;
|
|
|
prp_list[0] = old_prp_list[i - 1];
|
|
|
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
|
|
@@ -724,115 +526,105 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
|
|
|
dma_len = sg_dma_len(sg);
|
|
|
}
|
|
|
|
|
|
- return total_len;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
-static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
|
|
|
- struct nvme_iod *iod)
|
|
|
+static int nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|
|
+ struct nvme_command *cmnd)
|
|
|
{
|
|
|
- struct nvme_command cmnd;
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ struct request_queue *q = req->q;
|
|
|
+ enum dma_data_direction dma_dir = rq_data_dir(req) ?
|
|
|
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
|
|
+ int ret = BLK_MQ_RQ_QUEUE_ERROR;
|
|
|
|
|
|
- memcpy(&cmnd, req->cmd, sizeof(cmnd));
|
|
|
- cmnd.rw.command_id = req->tag;
|
|
|
- if (req->nr_phys_segments) {
|
|
|
- cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
|
|
- cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
|
|
|
- }
|
|
|
+ sg_init_table(iod->sg, req->nr_phys_segments);
|
|
|
+ iod->nents = blk_rq_map_sg(q, req, iod->sg);
|
|
|
+ if (!iod->nents)
|
|
|
+ goto out;
|
|
|
|
|
|
- __nvme_submit_cmd(nvmeq, &cmnd);
|
|
|
-}
|
|
|
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
|
|
|
+ if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
|
|
|
+ goto out;
|
|
|
|
|
|
-/*
|
|
|
- * We reuse the small pool to allocate the 16-byte range here as it is not
|
|
|
- * worth having a special pool for these or additional cases to handle freeing
|
|
|
- * the iod.
|
|
|
- */
|
|
|
-static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
|
|
- struct request *req, struct nvme_iod *iod)
|
|
|
-{
|
|
|
- struct nvme_dsm_range *range =
|
|
|
- (struct nvme_dsm_range *)iod_list(iod)[0];
|
|
|
- struct nvme_command cmnd;
|
|
|
+ if (!nvme_setup_prps(dev, req, blk_rq_bytes(req)))
|
|
|
+ goto out_unmap;
|
|
|
|
|
|
- range->cattr = cpu_to_le32(0);
|
|
|
- range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
|
|
|
- range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
|
|
|
+ ret = BLK_MQ_RQ_QUEUE_ERROR;
|
|
|
+ if (blk_integrity_rq(req)) {
|
|
|
+ if (blk_rq_count_integrity_sg(q, req->bio) != 1)
|
|
|
+ goto out_unmap;
|
|
|
|
|
|
- memset(&cmnd, 0, sizeof(cmnd));
|
|
|
- cmnd.dsm.opcode = nvme_cmd_dsm;
|
|
|
- cmnd.dsm.command_id = req->tag;
|
|
|
- cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
|
|
|
- cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
|
|
|
- cmnd.dsm.nr = 0;
|
|
|
- cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
|
|
+ sg_init_table(&iod->meta_sg, 1);
|
|
|
+ if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
|
|
|
+ goto out_unmap;
|
|
|
|
|
|
- __nvme_submit_cmd(nvmeq, &cmnd);
|
|
|
-}
|
|
|
+ if (rq_data_dir(req))
|
|
|
+ nvme_dif_remap(req, nvme_dif_prep);
|
|
|
|
|
|
-static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
|
|
- int cmdid)
|
|
|
-{
|
|
|
- struct nvme_command cmnd;
|
|
|
+ if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
|
|
|
+ goto out_unmap;
|
|
|
+ }
|
|
|
|
|
|
- memset(&cmnd, 0, sizeof(cmnd));
|
|
|
- cmnd.common.opcode = nvme_cmd_flush;
|
|
|
- cmnd.common.command_id = cmdid;
|
|
|
- cmnd.common.nsid = cpu_to_le32(ns->ns_id);
|
|
|
+ cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
|
|
+ cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
|
|
|
+ if (blk_integrity_rq(req))
|
|
|
+ cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
|
|
|
+ return BLK_MQ_RQ_QUEUE_OK;
|
|
|
|
|
|
- __nvme_submit_cmd(nvmeq, &cmnd);
|
|
|
+out_unmap:
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
|
|
|
- struct nvme_ns *ns)
|
|
|
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
|
|
|
{
|
|
|
- struct request *req = iod_get_private(iod);
|
|
|
- struct nvme_command cmnd;
|
|
|
- u16 control = 0;
|
|
|
- u32 dsmgmt = 0;
|
|
|
-
|
|
|
- if (req->cmd_flags & REQ_FUA)
|
|
|
- control |= NVME_RW_FUA;
|
|
|
- if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
|
|
|
- control |= NVME_RW_LR;
|
|
|
-
|
|
|
- if (req->cmd_flags & REQ_RAHEAD)
|
|
|
- dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
|
|
|
-
|
|
|
- memset(&cmnd, 0, sizeof(cmnd));
|
|
|
- cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
|
|
|
- cmnd.rw.command_id = req->tag;
|
|
|
- cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
|
|
|
- cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
|
|
- cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
|
|
|
- cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
|
|
|
- cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
|
|
|
-
|
|
|
- if (ns->ms) {
|
|
|
- switch (ns->pi_type) {
|
|
|
- case NVME_NS_DPS_PI_TYPE3:
|
|
|
- control |= NVME_RW_PRINFO_PRCHK_GUARD;
|
|
|
- break;
|
|
|
- case NVME_NS_DPS_PI_TYPE1:
|
|
|
- case NVME_NS_DPS_PI_TYPE2:
|
|
|
- control |= NVME_RW_PRINFO_PRCHK_GUARD |
|
|
|
- NVME_RW_PRINFO_PRCHK_REF;
|
|
|
- cmnd.rw.reftag = cpu_to_le32(
|
|
|
- nvme_block_nr(ns, blk_rq_pos(req)));
|
|
|
- break;
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ enum dma_data_direction dma_dir = rq_data_dir(req) ?
|
|
|
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
|
|
+
|
|
|
+ if (iod->nents) {
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
|
|
|
+ if (blk_integrity_rq(req)) {
|
|
|
+ if (!rq_data_dir(req))
|
|
|
+ nvme_dif_remap(req, nvme_dif_complete);
|
|
|
+ dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
|
|
|
}
|
|
|
- if (blk_integrity_rq(req))
|
|
|
- cmnd.rw.metadata =
|
|
|
- cpu_to_le64(sg_dma_address(iod->meta_sg));
|
|
|
- else
|
|
|
- control |= NVME_RW_PRINFO_PRACT;
|
|
|
}
|
|
|
|
|
|
- cmnd.rw.control = cpu_to_le16(control);
|
|
|
- cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
|
|
|
+ nvme_free_iod(dev, req);
|
|
|
+}
|
|
|
|
|
|
- __nvme_submit_cmd(nvmeq, &cmnd);
|
|
|
+/*
|
|
|
+ * We reuse the small pool to allocate the 16-byte range here as it is not
|
|
|
+ * worth having a special pool for these or additional cases to handle freeing
|
|
|
+ * the iod.
|
|
|
+ */
|
|
|
+static int nvme_setup_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
|
|
+ struct request *req, struct nvme_command *cmnd)
|
|
|
+{
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ struct nvme_dsm_range *range;
|
|
|
|
|
|
- return 0;
|
|
|
+ range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
|
|
|
+ &iod->first_dma);
|
|
|
+ if (!range)
|
|
|
+ return BLK_MQ_RQ_QUEUE_BUSY;
|
|
|
+ iod_list(req)[0] = (__le64 *)range;
|
|
|
+ iod->npages = 0;
|
|
|
+
|
|
|
+ range->cattr = cpu_to_le32(0);
|
|
|
+ range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
|
|
|
+ range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
|
|
|
+
|
|
|
+ memset(cmnd, 0, sizeof(*cmnd));
|
|
|
+ cmnd->dsm.opcode = nvme_cmd_dsm;
|
|
|
+ cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
|
|
|
+ cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
|
|
|
+ cmnd->dsm.nr = 0;
|
|
|
+ cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
|
|
+ return BLK_MQ_RQ_QUEUE_OK;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -845,9 +637,8 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
struct nvme_queue *nvmeq = hctx->driver_data;
|
|
|
struct nvme_dev *dev = nvmeq->dev;
|
|
|
struct request *req = bd->rq;
|
|
|
- struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
|
|
- struct nvme_iod *iod;
|
|
|
- enum dma_data_direction dma_dir;
|
|
|
+ struct nvme_command cmnd;
|
|
|
+ int ret = BLK_MQ_RQ_QUEUE_OK;
|
|
|
|
|
|
/*
|
|
|
* If formated with metadata, require the block layer provide a buffer
|
|
@@ -857,91 +648,72 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
if (ns && ns->ms && !blk_integrity_rq(req)) {
|
|
|
if (!(ns->pi_type && ns->ms == 8) &&
|
|
|
req->cmd_type != REQ_TYPE_DRV_PRIV) {
|
|
|
- blk_mq_complete_request(req, -EFAULT);
|
|
|
+ blk_mq_end_request(req, -EFAULT);
|
|
|
return BLK_MQ_RQ_QUEUE_OK;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- iod = nvme_alloc_iod(req, dev, GFP_ATOMIC);
|
|
|
- if (!iod)
|
|
|
- return BLK_MQ_RQ_QUEUE_BUSY;
|
|
|
+ ret = nvme_init_iod(req, dev);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
if (req->cmd_flags & REQ_DISCARD) {
|
|
|
- void *range;
|
|
|
- /*
|
|
|
- * We reuse the small pool to allocate the 16-byte range here
|
|
|
- * as it is not worth having a special pool for these or
|
|
|
- * additional cases to handle freeing the iod.
|
|
|
- */
|
|
|
- range = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC,
|
|
|
- &iod->first_dma);
|
|
|
- if (!range)
|
|
|
- goto retry_cmd;
|
|
|
- iod_list(iod)[0] = (__le64 *)range;
|
|
|
- iod->npages = 0;
|
|
|
- } else if (req->nr_phys_segments) {
|
|
|
- dma_dir = rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
|
|
+ ret = nvme_setup_discard(nvmeq, ns, req, &cmnd);
|
|
|
+ } else {
|
|
|
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
|
|
+ memcpy(&cmnd, req->cmd, sizeof(cmnd));
|
|
|
+ else if (req->cmd_flags & REQ_FLUSH)
|
|
|
+ nvme_setup_flush(ns, &cmnd);
|
|
|
+ else
|
|
|
+ nvme_setup_rw(ns, req, &cmnd);
|
|
|
+
|
|
|
+ if (req->nr_phys_segments)
|
|
|
+ ret = nvme_map_data(dev, req, &cmnd);
|
|
|
+ }
|
|
|
|
|
|
- sg_init_table(iod->sg, req->nr_phys_segments);
|
|
|
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
|
|
|
- if (!iod->nents)
|
|
|
- goto error_cmd;
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
|
|
|
- if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
|
|
|
- goto retry_cmd;
|
|
|
+ cmnd.common.command_id = req->tag;
|
|
|
+ blk_mq_start_request(req);
|
|
|
|
|
|
- if (blk_rq_bytes(req) !=
|
|
|
- nvme_setup_prps(dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
|
|
|
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
|
|
|
- goto retry_cmd;
|
|
|
- }
|
|
|
- if (blk_integrity_rq(req)) {
|
|
|
- if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
|
|
|
- dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
- dma_dir);
|
|
|
- goto error_cmd;
|
|
|
- }
|
|
|
+ spin_lock_irq(&nvmeq->q_lock);
|
|
|
+ __nvme_submit_cmd(nvmeq, &cmnd);
|
|
|
+ nvme_process_cq(nvmeq);
|
|
|
+ spin_unlock_irq(&nvmeq->q_lock);
|
|
|
+ return BLK_MQ_RQ_QUEUE_OK;
|
|
|
+out:
|
|
|
+ nvme_free_iod(dev, req);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
- sg_init_table(iod->meta_sg, 1);
|
|
|
- if (blk_rq_map_integrity_sg(
|
|
|
- req->q, req->bio, iod->meta_sg) != 1) {
|
|
|
- dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
- dma_dir);
|
|
|
- goto error_cmd;
|
|
|
- }
|
|
|
+static void nvme_complete_rq(struct request *req)
|
|
|
+{
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ struct nvme_dev *dev = iod->nvmeq->dev;
|
|
|
+ int error = 0;
|
|
|
|
|
|
- if (rq_data_dir(req))
|
|
|
- nvme_dif_remap(req, nvme_dif_prep);
|
|
|
+ nvme_unmap_data(dev, req);
|
|
|
|
|
|
- if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
|
|
|
- dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
- dma_dir);
|
|
|
- goto error_cmd;
|
|
|
- }
|
|
|
+ if (unlikely(req->errors)) {
|
|
|
+ if (nvme_req_needs_retry(req, req->errors)) {
|
|
|
+ nvme_requeue_req(req);
|
|
|
+ return;
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- nvme_set_info(cmd, iod, req_completion);
|
|
|
- spin_lock_irq(&nvmeq->q_lock);
|
|
|
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
|
|
- nvme_submit_priv(nvmeq, req, iod);
|
|
|
- else if (req->cmd_flags & REQ_DISCARD)
|
|
|
- nvme_submit_discard(nvmeq, ns, req, iod);
|
|
|
- else if (req->cmd_flags & REQ_FLUSH)
|
|
|
- nvme_submit_flush(nvmeq, ns, req->tag);
|
|
|
- else
|
|
|
- nvme_submit_iod(nvmeq, iod, ns);
|
|
|
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
|
|
+ error = req->errors;
|
|
|
+ else
|
|
|
+ error = nvme_error_status(req->errors);
|
|
|
+ }
|
|
|
|
|
|
- nvme_process_cq(nvmeq);
|
|
|
- spin_unlock_irq(&nvmeq->q_lock);
|
|
|
- return BLK_MQ_RQ_QUEUE_OK;
|
|
|
+ if (unlikely(iod->aborted)) {
|
|
|
+ dev_warn(dev->dev,
|
|
|
+ "completing aborted command with status: %04x\n",
|
|
|
+ req->errors);
|
|
|
+ }
|
|
|
|
|
|
- error_cmd:
|
|
|
- nvme_free_iod(dev, iod);
|
|
|
- return BLK_MQ_RQ_QUEUE_ERROR;
|
|
|
- retry_cmd:
|
|
|
- nvme_free_iod(dev, iod);
|
|
|
- return BLK_MQ_RQ_QUEUE_BUSY;
|
|
|
+ blk_mq_end_request(req, error);
|
|
|
}
|
|
|
|
|
|
static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
|
|
@@ -952,20 +724,47 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
|
|
|
phase = nvmeq->cq_phase;
|
|
|
|
|
|
for (;;) {
|
|
|
- void *ctx;
|
|
|
- nvme_completion_fn fn;
|
|
|
struct nvme_completion cqe = nvmeq->cqes[head];
|
|
|
- if ((le16_to_cpu(cqe.status) & 1) != phase)
|
|
|
+ u16 status = le16_to_cpu(cqe.status);
|
|
|
+ struct request *req;
|
|
|
+
|
|
|
+ if ((status & 1) != phase)
|
|
|
break;
|
|
|
nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
|
|
|
if (++head == nvmeq->q_depth) {
|
|
|
head = 0;
|
|
|
phase = !phase;
|
|
|
}
|
|
|
+
|
|
|
if (tag && *tag == cqe.command_id)
|
|
|
*tag = -1;
|
|
|
- ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
|
|
|
- fn(nvmeq, ctx, &cqe);
|
|
|
+
|
|
|
+ if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
|
|
|
+ dev_warn(nvmeq->q_dmadev,
|
|
|
+ "invalid id %d completed on queue %d\n",
|
|
|
+ cqe.command_id, le16_to_cpu(cqe.sq_id));
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * AEN requests are special as they don't time out and can
|
|
|
+ * survive any kind of queue freeze and often don't respond to
|
|
|
+ * aborts. We don't even bother to allocate a struct request
|
|
|
+ * for them but rather special case them here.
|
|
|
+ */
|
|
|
+ if (unlikely(nvmeq->qid == 0 &&
|
|
|
+ cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
|
|
|
+ nvme_complete_async_event(nvmeq->dev, &cqe);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
|
|
|
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
|
|
+ u32 result = le32_to_cpu(cqe.result);
|
|
|
+ req->special = (void *)(uintptr_t)result;
|
|
|
+ }
|
|
|
+ blk_mq_complete_request(req, status >> 1);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/* If the controller ignores the cq head doorbell and continuously
|
|
@@ -1028,127 +827,30 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Returns 0 on success. If the result is negative, it's a Linux error code;
|
|
|
- * if the result is positive, it's an NVM Express status code
|
|
|
- */
|
|
|
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
|
|
- void *buffer, void __user *ubuffer, unsigned bufflen,
|
|
|
- u32 *result, unsigned timeout)
|
|
|
+static void nvme_submit_async_event(struct nvme_dev *dev)
|
|
|
{
|
|
|
- bool write = cmd->common.opcode & 1;
|
|
|
- struct bio *bio = NULL;
|
|
|
- struct request *req;
|
|
|
- int ret;
|
|
|
-
|
|
|
- req = blk_mq_alloc_request(q, write, 0);
|
|
|
- if (IS_ERR(req))
|
|
|
- return PTR_ERR(req);
|
|
|
+ struct nvme_command c;
|
|
|
|
|
|
- req->cmd_type = REQ_TYPE_DRV_PRIV;
|
|
|
- req->cmd_flags |= REQ_FAILFAST_DRIVER;
|
|
|
- req->__data_len = 0;
|
|
|
- req->__sector = (sector_t) -1;
|
|
|
- req->bio = req->biotail = NULL;
|
|
|
-
|
|
|
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
|
|
|
-
|
|
|
- req->cmd = (unsigned char *)cmd;
|
|
|
- req->cmd_len = sizeof(struct nvme_command);
|
|
|
- req->special = (void *)0;
|
|
|
-
|
|
|
- if (buffer && bufflen) {
|
|
|
- ret = blk_rq_map_kern(q, req, buffer, bufflen,
|
|
|
- __GFP_DIRECT_RECLAIM);
|
|
|
- if (ret)
|
|
|
- goto out;
|
|
|
- } else if (ubuffer && bufflen) {
|
|
|
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
|
|
|
- __GFP_DIRECT_RECLAIM);
|
|
|
- if (ret)
|
|
|
- goto out;
|
|
|
- bio = req->bio;
|
|
|
- }
|
|
|
+ memset(&c, 0, sizeof(c));
|
|
|
+ c.common.opcode = nvme_admin_async_event;
|
|
|
+ c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit;
|
|
|
|
|
|
- blk_execute_rq(req->q, NULL, req, 0);
|
|
|
- if (bio)
|
|
|
- blk_rq_unmap_user(bio);
|
|
|
- if (result)
|
|
|
- *result = (u32)(uintptr_t)req->special;
|
|
|
- ret = req->errors;
|
|
|
- out:
|
|
|
- blk_mq_free_request(req);
|
|
|
- return ret;
|
|
|
+ __nvme_submit_cmd(dev->queues[0], &c);
|
|
|
}
|
|
|
|
|
|
-int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
|
|
- void *buffer, unsigned bufflen)
|
|
|
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
|
|
{
|
|
|
- return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0);
|
|
|
+ struct nvme_command c;
|
|
|
+
|
|
|
+ memset(&c, 0, sizeof(c));
|
|
|
+ c.delete_queue.opcode = opcode;
|
|
|
+ c.delete_queue.qid = cpu_to_le16(id);
|
|
|
+
|
|
|
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
|
|
|
}
|
|
|
|
|
|
-static int nvme_submit_async_admin_req(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- struct nvme_queue *nvmeq = dev->queues[0];
|
|
|
- struct nvme_command c;
|
|
|
- struct nvme_cmd_info *cmd_info;
|
|
|
- struct request *req;
|
|
|
-
|
|
|
- req = blk_mq_alloc_request(dev->admin_q, WRITE,
|
|
|
- BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED);
|
|
|
- if (IS_ERR(req))
|
|
|
- return PTR_ERR(req);
|
|
|
-
|
|
|
- req->cmd_flags |= REQ_NO_TIMEOUT;
|
|
|
- cmd_info = blk_mq_rq_to_pdu(req);
|
|
|
- nvme_set_info(cmd_info, NULL, async_req_completion);
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.common.opcode = nvme_admin_async_event;
|
|
|
- c.common.command_id = req->tag;
|
|
|
-
|
|
|
- blk_mq_free_request(req);
|
|
|
- __nvme_submit_cmd(nvmeq, &c);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
|
|
|
- struct nvme_command *cmd,
|
|
|
- struct async_cmd_info *cmdinfo, unsigned timeout)
|
|
|
-{
|
|
|
- struct nvme_queue *nvmeq = dev->queues[0];
|
|
|
- struct request *req;
|
|
|
- struct nvme_cmd_info *cmd_rq;
|
|
|
-
|
|
|
- req = blk_mq_alloc_request(dev->admin_q, WRITE, 0);
|
|
|
- if (IS_ERR(req))
|
|
|
- return PTR_ERR(req);
|
|
|
-
|
|
|
- req->timeout = timeout;
|
|
|
- cmd_rq = blk_mq_rq_to_pdu(req);
|
|
|
- cmdinfo->req = req;
|
|
|
- nvme_set_info(cmd_rq, cmdinfo, async_completion);
|
|
|
- cmdinfo->status = -EINTR;
|
|
|
-
|
|
|
- cmd->common.command_id = req->tag;
|
|
|
-
|
|
|
- nvme_submit_cmd(nvmeq, cmd);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
|
|
-{
|
|
|
- struct nvme_command c;
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.delete_queue.opcode = opcode;
|
|
|
- c.delete_queue.qid = cpu_to_le16(id);
|
|
|
-
|
|
|
- return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
|
|
|
-}
|
|
|
-
|
|
|
-static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
|
|
- struct nvme_queue *nvmeq)
|
|
|
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
|
|
+ struct nvme_queue *nvmeq)
|
|
|
{
|
|
|
struct nvme_command c;
|
|
|
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
|
|
@@ -1165,7 +867,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
|
|
c.create_cq.cq_flags = cpu_to_le16(flags);
|
|
|
c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
|
|
|
|
|
|
- return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
|
|
|
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
|
|
|
}
|
|
|
|
|
|
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
|
@@ -1186,7 +888,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
|
|
c.create_sq.sq_flags = cpu_to_le16(flags);
|
|
|
c.create_sq.cqid = cpu_to_le16(qid);
|
|
|
|
|
|
- return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
|
|
|
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
|
|
|
}
|
|
|
|
|
|
static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
|
|
@@ -1199,195 +901,111 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
|
|
|
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
|
|
|
}
|
|
|
|
|
|
-int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
|
|
|
+static void abort_endio(struct request *req, int error)
|
|
|
{
|
|
|
- struct nvme_command c = { };
|
|
|
- int error;
|
|
|
-
|
|
|
- /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
|
|
|
- c.identify.opcode = nvme_admin_identify;
|
|
|
- c.identify.cns = cpu_to_le32(1);
|
|
|
-
|
|
|
- *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
|
|
|
- if (!*id)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
|
|
|
- sizeof(struct nvme_id_ctrl));
|
|
|
- if (error)
|
|
|
- kfree(*id);
|
|
|
- return error;
|
|
|
-}
|
|
|
-
|
|
|
-int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
|
|
|
- struct nvme_id_ns **id)
|
|
|
-{
|
|
|
- struct nvme_command c = { };
|
|
|
- int error;
|
|
|
-
|
|
|
- /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
|
|
|
- c.identify.opcode = nvme_admin_identify,
|
|
|
- c.identify.nsid = cpu_to_le32(nsid),
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ struct nvme_queue *nvmeq = iod->nvmeq;
|
|
|
+ u32 result = (u32)(uintptr_t)req->special;
|
|
|
+ u16 status = req->errors;
|
|
|
|
|
|
- *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
|
|
|
- if (!*id)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
|
|
|
- sizeof(struct nvme_id_ns));
|
|
|
- if (error)
|
|
|
- kfree(*id);
|
|
|
- return error;
|
|
|
-}
|
|
|
-
|
|
|
-int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
|
|
|
- dma_addr_t dma_addr, u32 *result)
|
|
|
-{
|
|
|
- struct nvme_command c;
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.features.opcode = nvme_admin_get_features;
|
|
|
- c.features.nsid = cpu_to_le32(nsid);
|
|
|
- c.features.prp1 = cpu_to_le64(dma_addr);
|
|
|
- c.features.fid = cpu_to_le32(fid);
|
|
|
-
|
|
|
- return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
|
|
|
- result, 0);
|
|
|
-}
|
|
|
-
|
|
|
-int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
|
|
|
- dma_addr_t dma_addr, u32 *result)
|
|
|
-{
|
|
|
- struct nvme_command c;
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.features.opcode = nvme_admin_set_features;
|
|
|
- c.features.prp1 = cpu_to_le64(dma_addr);
|
|
|
- c.features.fid = cpu_to_le32(fid);
|
|
|
- c.features.dword11 = cpu_to_le32(dword11);
|
|
|
-
|
|
|
- return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
|
|
|
- result, 0);
|
|
|
-}
|
|
|
-
|
|
|
-int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log)
|
|
|
-{
|
|
|
- struct nvme_command c = { };
|
|
|
- int error;
|
|
|
-
|
|
|
- c.common.opcode = nvme_admin_get_log_page,
|
|
|
- c.common.nsid = cpu_to_le32(0xFFFFFFFF),
|
|
|
- c.common.cdw10[0] = cpu_to_le32(
|
|
|
- (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
|
|
|
- NVME_LOG_SMART),
|
|
|
-
|
|
|
- *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
|
|
|
- if (!*log)
|
|
|
- return -ENOMEM;
|
|
|
+ dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
|
|
|
+ atomic_inc(&nvmeq->dev->ctrl.abort_limit);
|
|
|
|
|
|
- error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
|
|
|
- sizeof(struct nvme_smart_log));
|
|
|
- if (error)
|
|
|
- kfree(*log);
|
|
|
- return error;
|
|
|
+ blk_mq_free_request(req);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * nvme_abort_req - Attempt aborting a request
|
|
|
- *
|
|
|
- * Schedule controller reset if the command was already aborted once before and
|
|
|
- * still hasn't been returned to the driver, or if this is the admin queue.
|
|
|
- */
|
|
|
-static void nvme_abort_req(struct request *req)
|
|
|
+static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|
|
{
|
|
|
- struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
|
|
|
- struct nvme_queue *nvmeq = cmd_rq->nvmeq;
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ struct nvme_queue *nvmeq = iod->nvmeq;
|
|
|
struct nvme_dev *dev = nvmeq->dev;
|
|
|
struct request *abort_req;
|
|
|
- struct nvme_cmd_info *abort_cmd;
|
|
|
struct nvme_command cmd;
|
|
|
|
|
|
- if (!nvmeq->qid || cmd_rq->aborted) {
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- if (!__nvme_reset(dev)) {
|
|
|
- dev_warn(dev->dev,
|
|
|
- "I/O %d QID %d timeout, reset controller\n",
|
|
|
- req->tag, nvmeq->qid);
|
|
|
- }
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
- return;
|
|
|
+ /*
|
|
|
+ * Shutdown immediately if controller times out while starting. The
|
|
|
+ * reset work will see the pci device disabled when it gets the forced
|
|
|
+ * cancellation error. All outstanding requests are completed on
|
|
|
+ * shutdown, so we return BLK_EH_HANDLED.
|
|
|
+ */
|
|
|
+ if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
|
|
|
+ dev_warn(dev->dev,
|
|
|
+ "I/O %d QID %d timeout, disable controller\n",
|
|
|
+ req->tag, nvmeq->qid);
|
|
|
+ nvme_dev_disable(dev, false);
|
|
|
+ req->errors = NVME_SC_CANCELLED;
|
|
|
+ return BLK_EH_HANDLED;
|
|
|
}
|
|
|
|
|
|
- if (!dev->abort_limit)
|
|
|
- return;
|
|
|
+ /*
|
|
|
+ * Shutdown the controller immediately and schedule a reset if the
|
|
|
+ * command was already aborted once before and still hasn't been
|
|
|
+ * returned to the driver, or if this is the admin queue.
|
|
|
+ */
|
|
|
+ if (!nvmeq->qid || iod->aborted) {
|
|
|
+ dev_warn(dev->dev,
|
|
|
+ "I/O %d QID %d timeout, reset controller\n",
|
|
|
+ req->tag, nvmeq->qid);
|
|
|
+ nvme_dev_disable(dev, false);
|
|
|
+ queue_work(nvme_workq, &dev->reset_work);
|
|
|
|
|
|
- abort_req = blk_mq_alloc_request(dev->admin_q, WRITE,
|
|
|
- BLK_MQ_REQ_NOWAIT);
|
|
|
- if (IS_ERR(abort_req))
|
|
|
- return;
|
|
|
+ /*
|
|
|
+ * Mark the request as handled, since the inline shutdown
|
|
|
+ * forces all outstanding requests to complete.
|
|
|
+ */
|
|
|
+ req->errors = NVME_SC_CANCELLED;
|
|
|
+ return BLK_EH_HANDLED;
|
|
|
+ }
|
|
|
|
|
|
- abort_cmd = blk_mq_rq_to_pdu(abort_req);
|
|
|
- nvme_set_info(abort_cmd, abort_req, abort_completion);
|
|
|
+ iod->aborted = 1;
|
|
|
+
|
|
|
+ if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
|
|
|
+ atomic_inc(&dev->ctrl.abort_limit);
|
|
|
+ return BLK_EH_RESET_TIMER;
|
|
|
+ }
|
|
|
|
|
|
memset(&cmd, 0, sizeof(cmd));
|
|
|
cmd.abort.opcode = nvme_admin_abort_cmd;
|
|
|
cmd.abort.cid = req->tag;
|
|
|
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
|
|
|
- cmd.abort.command_id = abort_req->tag;
|
|
|
|
|
|
- --dev->abort_limit;
|
|
|
- cmd_rq->aborted = 1;
|
|
|
+ dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
|
|
|
+ req->tag, nvmeq->qid);
|
|
|
+
|
|
|
+ abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
|
|
|
+ BLK_MQ_REQ_NOWAIT);
|
|
|
+ if (IS_ERR(abort_req)) {
|
|
|
+ atomic_inc(&dev->ctrl.abort_limit);
|
|
|
+ return BLK_EH_RESET_TIMER;
|
|
|
+ }
|
|
|
+
|
|
|
+ abort_req->timeout = ADMIN_TIMEOUT;
|
|
|
+ abort_req->end_io_data = NULL;
|
|
|
+ blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
|
|
|
|
|
|
- dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
|
|
|
- nvmeq->qid);
|
|
|
- nvme_submit_cmd(dev->queues[0], &cmd);
|
|
|
+ /*
|
|
|
+ * The aborted req will be completed on receiving the abort req.
|
|
|
+ * We enable the timer again. If hit twice, it'll cause a device reset,
|
|
|
+ * as the device then is in a faulty state.
|
|
|
+ */
|
|
|
+ return BLK_EH_RESET_TIMER;
|
|
|
}
|
|
|
|
|
|
static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
|
|
|
{
|
|
|
struct nvme_queue *nvmeq = data;
|
|
|
- void *ctx;
|
|
|
- nvme_completion_fn fn;
|
|
|
- struct nvme_cmd_info *cmd;
|
|
|
- struct nvme_completion cqe;
|
|
|
+ int status;
|
|
|
|
|
|
if (!blk_mq_request_started(req))
|
|
|
return;
|
|
|
|
|
|
- cmd = blk_mq_rq_to_pdu(req);
|
|
|
-
|
|
|
- if (cmd->ctx == CMD_CTX_CANCELLED)
|
|
|
- return;
|
|
|
+ dev_warn(nvmeq->q_dmadev,
|
|
|
+ "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
|
|
|
|
|
|
+ status = NVME_SC_ABORT_REQ;
|
|
|
if (blk_queue_dying(req->q))
|
|
|
- cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
|
|
|
- else
|
|
|
- cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
|
|
|
-
|
|
|
-
|
|
|
- dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
|
|
|
- req->tag, nvmeq->qid);
|
|
|
- ctx = cancel_cmd_info(cmd, &fn);
|
|
|
- fn(nvmeq, ctx, &cqe);
|
|
|
-}
|
|
|
-
|
|
|
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|
|
-{
|
|
|
- struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
|
|
- struct nvme_queue *nvmeq = cmd->nvmeq;
|
|
|
-
|
|
|
- dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
|
|
|
- nvmeq->qid);
|
|
|
- spin_lock_irq(&nvmeq->q_lock);
|
|
|
- nvme_abort_req(req);
|
|
|
- spin_unlock_irq(&nvmeq->q_lock);
|
|
|
-
|
|
|
- /*
|
|
|
- * The aborted req will be completed on receiving the abort req.
|
|
|
- * We enable the timer again. If hit twice, it'll cause a device reset,
|
|
|
- * as the device then is in a faulty state.
|
|
|
- */
|
|
|
- return BLK_EH_RESET_TIMER;
|
|
|
+ status |= NVME_SC_DNR;
|
|
|
+ blk_mq_complete_request(req, status);
|
|
|
}
|
|
|
|
|
|
static void nvme_free_queue(struct nvme_queue *nvmeq)
|
|
@@ -1430,8 +1048,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
|
|
|
nvmeq->cq_vector = -1;
|
|
|
spin_unlock_irq(&nvmeq->q_lock);
|
|
|
|
|
|
- if (!nvmeq->qid && nvmeq->dev->admin_q)
|
|
|
- blk_mq_freeze_queue_start(nvmeq->dev->admin_q);
|
|
|
+ if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
|
|
|
+ blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
|
|
|
|
|
|
irq_set_affinity_hint(vector, NULL);
|
|
|
free_irq(vector, nvmeq);
|
|
@@ -1447,21 +1065,20 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
|
|
|
spin_unlock_irq(&nvmeq->q_lock);
|
|
|
}
|
|
|
|
|
|
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
|
|
|
+static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
|
|
|
{
|
|
|
- struct nvme_queue *nvmeq = dev->queues[qid];
|
|
|
+ struct nvme_queue *nvmeq = dev->queues[0];
|
|
|
|
|
|
if (!nvmeq)
|
|
|
return;
|
|
|
if (nvme_suspend_queue(nvmeq))
|
|
|
return;
|
|
|
|
|
|
- /* Don't tell the adapter to delete the admin queue.
|
|
|
- * Don't tell a removed adapter to delete IO queues. */
|
|
|
- if (qid && readl(&dev->bar->csts) != -1) {
|
|
|
- adapter_delete_sq(dev, qid);
|
|
|
- adapter_delete_cq(dev, qid);
|
|
|
- }
|
|
|
+ if (shutdown)
|
|
|
+ nvme_shutdown_ctrl(&dev->ctrl);
|
|
|
+ else
|
|
|
+ nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
|
|
|
+ dev->bar + NVME_REG_CAP));
|
|
|
|
|
|
spin_lock_irq(&nvmeq->q_lock);
|
|
|
nvme_process_cq(nvmeq);
|
|
@@ -1472,11 +1089,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
|
|
int entry_size)
|
|
|
{
|
|
|
int q_depth = dev->q_depth;
|
|
|
- unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size);
|
|
|
+ unsigned q_size_aligned = roundup(q_depth * entry_size,
|
|
|
+ dev->ctrl.page_size);
|
|
|
|
|
|
if (q_size_aligned * nr_io_queues > dev->cmb_size) {
|
|
|
u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
|
|
|
- mem_per_q = round_down(mem_per_q, dev->page_size);
|
|
|
+ mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
|
|
|
q_depth = div_u64(mem_per_q, entry_size);
|
|
|
|
|
|
/*
|
|
@@ -1495,8 +1113,8 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
|
|
int qid, int depth)
|
|
|
{
|
|
|
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
|
|
|
- unsigned offset = (qid - 1) *
|
|
|
- roundup(SQ_SIZE(depth), dev->page_size);
|
|
|
+ unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
|
|
|
+ dev->ctrl.page_size);
|
|
|
nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
|
|
|
nvmeq->sq_cmds_io = dev->cmb + offset;
|
|
|
} else {
|
|
@@ -1527,7 +1145,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
|
|
nvmeq->q_dmadev = dev->dev;
|
|
|
nvmeq->dev = dev;
|
|
|
snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
|
|
|
- dev->instance, qid);
|
|
|
+ dev->ctrl.instance, qid);
|
|
|
spin_lock_init(&nvmeq->q_lock);
|
|
|
nvmeq->cq_head = 0;
|
|
|
nvmeq->cq_phase = 1;
|
|
@@ -1604,79 +1222,9 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
|
|
|
-{
|
|
|
- unsigned long timeout;
|
|
|
- u32 bit = enabled ? NVME_CSTS_RDY : 0;
|
|
|
-
|
|
|
- timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
|
|
|
-
|
|
|
- while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
|
|
|
- msleep(100);
|
|
|
- if (fatal_signal_pending(current))
|
|
|
- return -EINTR;
|
|
|
- if (time_after(jiffies, timeout)) {
|
|
|
- dev_err(dev->dev,
|
|
|
- "Device not ready; aborting %s\n", enabled ?
|
|
|
- "initialisation" : "reset");
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * If the device has been passed off to us in an enabled state, just clear
|
|
|
- * the enabled bit. The spec says we should set the 'shutdown notification
|
|
|
- * bits', but doing so may cause the device to complete commands to the
|
|
|
- * admin queue ... and we don't know what memory that might be pointing at!
|
|
|
- */
|
|
|
-static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
|
|
|
-{
|
|
|
- dev->ctrl_config &= ~NVME_CC_SHN_MASK;
|
|
|
- dev->ctrl_config &= ~NVME_CC_ENABLE;
|
|
|
- writel(dev->ctrl_config, &dev->bar->cc);
|
|
|
-
|
|
|
- return nvme_wait_ready(dev, cap, false);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
|
|
|
-{
|
|
|
- dev->ctrl_config &= ~NVME_CC_SHN_MASK;
|
|
|
- dev->ctrl_config |= NVME_CC_ENABLE;
|
|
|
- writel(dev->ctrl_config, &dev->bar->cc);
|
|
|
-
|
|
|
- return nvme_wait_ready(dev, cap, true);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_shutdown_ctrl(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- unsigned long timeout;
|
|
|
-
|
|
|
- dev->ctrl_config &= ~NVME_CC_SHN_MASK;
|
|
|
- dev->ctrl_config |= NVME_CC_SHN_NORMAL;
|
|
|
-
|
|
|
- writel(dev->ctrl_config, &dev->bar->cc);
|
|
|
-
|
|
|
- timeout = SHUTDOWN_TIMEOUT + jiffies;
|
|
|
- while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
|
|
|
- NVME_CSTS_SHST_CMPLT) {
|
|
|
- msleep(100);
|
|
|
- if (fatal_signal_pending(current))
|
|
|
- return -EINTR;
|
|
|
- if (time_after(jiffies, timeout)) {
|
|
|
- dev_err(dev->dev,
|
|
|
- "Device shutdown incomplete; abort shutdown\n");
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static struct blk_mq_ops nvme_mq_admin_ops = {
|
|
|
.queue_rq = nvme_queue_rq,
|
|
|
+ .complete = nvme_complete_rq,
|
|
|
.map_queue = blk_mq_map_queue,
|
|
|
.init_hctx = nvme_admin_init_hctx,
|
|
|
.exit_hctx = nvme_admin_exit_hctx,
|
|
@@ -1686,6 +1234,7 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
|
|
|
|
|
|
static struct blk_mq_ops nvme_mq_ops = {
|
|
|
.queue_rq = nvme_queue_rq,
|
|
|
+ .complete = nvme_complete_rq,
|
|
|
.map_queue = blk_mq_map_queue,
|
|
|
.init_hctx = nvme_init_hctx,
|
|
|
.init_request = nvme_init_request,
|
|
@@ -1695,19 +1244,23 @@ static struct blk_mq_ops nvme_mq_ops = {
|
|
|
|
|
|
static void nvme_dev_remove_admin(struct nvme_dev *dev)
|
|
|
{
|
|
|
- if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
|
|
|
- blk_cleanup_queue(dev->admin_q);
|
|
|
+ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
|
|
|
+ blk_cleanup_queue(dev->ctrl.admin_q);
|
|
|
blk_mq_free_tag_set(&dev->admin_tagset);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
|
|
|
{
|
|
|
- if (!dev->admin_q) {
|
|
|
+ if (!dev->ctrl.admin_q) {
|
|
|
dev->admin_tagset.ops = &nvme_mq_admin_ops;
|
|
|
dev->admin_tagset.nr_hw_queues = 1;
|
|
|
- dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
|
|
|
- dev->admin_tagset.reserved_tags = 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Subtract one to leave an empty queue entry for 'Full Queue'
|
|
|
+ * condition. See NVM-Express 1.2 specification, section 4.1.2.
|
|
|
+ */
|
|
|
+ dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
|
|
|
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
|
|
|
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
|
|
|
dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
|
|
@@ -1716,18 +1269,18 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
|
|
|
if (blk_mq_alloc_tag_set(&dev->admin_tagset))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
|
|
|
- if (IS_ERR(dev->admin_q)) {
|
|
|
+ dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
|
|
|
+ if (IS_ERR(dev->ctrl.admin_q)) {
|
|
|
blk_mq_free_tag_set(&dev->admin_tagset);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
- if (!blk_get_queue(dev->admin_q)) {
|
|
|
+ if (!blk_get_queue(dev->ctrl.admin_q)) {
|
|
|
nvme_dev_remove_admin(dev);
|
|
|
- dev->admin_q = NULL;
|
|
|
+ dev->ctrl.admin_q = NULL;
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
} else
|
|
|
- blk_mq_unfreeze_queue(dev->admin_q);
|
|
|
+ blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1736,31 +1289,17 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
|
|
{
|
|
|
int result;
|
|
|
u32 aqa;
|
|
|
- u64 cap = lo_hi_readq(&dev->bar->cap);
|
|
|
+ u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
|
|
|
struct nvme_queue *nvmeq;
|
|
|
- /*
|
|
|
- * default to a 4K page size, with the intention to update this
|
|
|
- * path in the future to accomodate architectures with differing
|
|
|
- * kernel and IO page sizes.
|
|
|
- */
|
|
|
- unsigned page_shift = 12;
|
|
|
- unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
|
|
|
-
|
|
|
- if (page_shift < dev_page_min) {
|
|
|
- dev_err(dev->dev,
|
|
|
- "Minimum device page size (%u) too large for "
|
|
|
- "host (%u)\n", 1 << dev_page_min,
|
|
|
- 1 << page_shift);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
|
|
|
- dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
|
|
|
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
|
|
|
NVME_CAP_NSSRC(cap) : 0;
|
|
|
|
|
|
- if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO))
|
|
|
- writel(NVME_CSTS_NSSRO, &dev->bar->csts);
|
|
|
+ if (dev->subsystem &&
|
|
|
+ (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
|
|
|
+ writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
|
|
|
|
|
|
- result = nvme_disable_ctrl(dev, cap);
|
|
|
+ result = nvme_disable_ctrl(&dev->ctrl, cap);
|
|
|
if (result < 0)
|
|
|
return result;
|
|
|
|
|
@@ -1774,18 +1313,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
|
|
aqa = nvmeq->q_depth - 1;
|
|
|
aqa |= aqa << 16;
|
|
|
|
|
|
- dev->page_size = 1 << page_shift;
|
|
|
-
|
|
|
- dev->ctrl_config = NVME_CC_CSS_NVM;
|
|
|
- dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
|
|
|
- dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
|
|
|
- dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
|
|
|
-
|
|
|
- writel(aqa, &dev->bar->aqa);
|
|
|
- lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
|
|
|
- lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
|
|
|
+ writel(aqa, dev->bar + NVME_REG_AQA);
|
|
|
+ lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
|
|
|
+ lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
|
|
|
|
|
|
- result = nvme_enable_ctrl(dev, cap);
|
|
|
+ result = nvme_enable_ctrl(&dev->ctrl, cap);
|
|
|
if (result)
|
|
|
goto free_nvmeq;
|
|
|
|
|
@@ -1803,406 +1335,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
|
|
-{
|
|
|
- struct nvme_dev *dev = ns->dev;
|
|
|
- struct nvme_user_io io;
|
|
|
- struct nvme_command c;
|
|
|
- unsigned length, meta_len;
|
|
|
- int status, write;
|
|
|
- dma_addr_t meta_dma = 0;
|
|
|
- void *meta = NULL;
|
|
|
- void __user *metadata;
|
|
|
-
|
|
|
- if (copy_from_user(&io, uio, sizeof(io)))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- switch (io.opcode) {
|
|
|
- case nvme_cmd_write:
|
|
|
- case nvme_cmd_read:
|
|
|
- case nvme_cmd_compare:
|
|
|
- break;
|
|
|
- default:
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- length = (io.nblocks + 1) << ns->lba_shift;
|
|
|
- meta_len = (io.nblocks + 1) * ns->ms;
|
|
|
- metadata = (void __user *)(uintptr_t)io.metadata;
|
|
|
- write = io.opcode & 1;
|
|
|
-
|
|
|
- if (ns->ext) {
|
|
|
- length += meta_len;
|
|
|
- meta_len = 0;
|
|
|
- }
|
|
|
- if (meta_len) {
|
|
|
- if (((io.metadata & 3) || !io.metadata) && !ns->ext)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- meta = dma_alloc_coherent(dev->dev, meta_len,
|
|
|
- &meta_dma, GFP_KERNEL);
|
|
|
-
|
|
|
- if (!meta) {
|
|
|
- status = -ENOMEM;
|
|
|
- goto unmap;
|
|
|
- }
|
|
|
- if (write) {
|
|
|
- if (copy_from_user(meta, metadata, meta_len)) {
|
|
|
- status = -EFAULT;
|
|
|
- goto unmap;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.rw.opcode = io.opcode;
|
|
|
- c.rw.flags = io.flags;
|
|
|
- c.rw.nsid = cpu_to_le32(ns->ns_id);
|
|
|
- c.rw.slba = cpu_to_le64(io.slba);
|
|
|
- c.rw.length = cpu_to_le16(io.nblocks);
|
|
|
- c.rw.control = cpu_to_le16(io.control);
|
|
|
- c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
|
|
|
- c.rw.reftag = cpu_to_le32(io.reftag);
|
|
|
- c.rw.apptag = cpu_to_le16(io.apptag);
|
|
|
- c.rw.appmask = cpu_to_le16(io.appmask);
|
|
|
- c.rw.metadata = cpu_to_le64(meta_dma);
|
|
|
-
|
|
|
- status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
|
|
|
- (void __user *)(uintptr_t)io.addr, length, NULL, 0);
|
|
|
- unmap:
|
|
|
- if (meta) {
|
|
|
- if (status == NVME_SC_SUCCESS && !write) {
|
|
|
- if (copy_to_user(metadata, meta, meta_len))
|
|
|
- status = -EFAULT;
|
|
|
- }
|
|
|
- dma_free_coherent(dev->dev, meta_len, meta, meta_dma);
|
|
|
- }
|
|
|
- return status;
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
|
|
|
- struct nvme_passthru_cmd __user *ucmd)
|
|
|
-{
|
|
|
- struct nvme_passthru_cmd cmd;
|
|
|
- struct nvme_command c;
|
|
|
- unsigned timeout = 0;
|
|
|
- int status;
|
|
|
-
|
|
|
- if (!capable(CAP_SYS_ADMIN))
|
|
|
- return -EACCES;
|
|
|
- if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.common.opcode = cmd.opcode;
|
|
|
- c.common.flags = cmd.flags;
|
|
|
- c.common.nsid = cpu_to_le32(cmd.nsid);
|
|
|
- c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
|
|
|
- c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
|
|
|
- c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
|
|
|
- c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
|
|
|
- c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
|
|
|
- c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
|
|
|
- c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
|
|
|
- c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
|
|
|
-
|
|
|
- if (cmd.timeout_ms)
|
|
|
- timeout = msecs_to_jiffies(cmd.timeout_ms);
|
|
|
-
|
|
|
- status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
|
|
|
- NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
|
|
|
- &cmd.result, timeout);
|
|
|
- if (status >= 0) {
|
|
|
- if (put_user(cmd.result, &ucmd->result))
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
-
|
|
|
- return status;
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_subsys_reset(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- if (!dev->subsystem)
|
|
|
- return -ENOTTY;
|
|
|
-
|
|
|
- writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
|
|
- unsigned long arg)
|
|
|
-{
|
|
|
- struct nvme_ns *ns = bdev->bd_disk->private_data;
|
|
|
-
|
|
|
- switch (cmd) {
|
|
|
- case NVME_IOCTL_ID:
|
|
|
- force_successful_syscall_return();
|
|
|
- return ns->ns_id;
|
|
|
- case NVME_IOCTL_ADMIN_CMD:
|
|
|
- return nvme_user_cmd(ns->dev, NULL, (void __user *)arg);
|
|
|
- case NVME_IOCTL_IO_CMD:
|
|
|
- return nvme_user_cmd(ns->dev, ns, (void __user *)arg);
|
|
|
- case NVME_IOCTL_SUBMIT_IO:
|
|
|
- return nvme_submit_io(ns, (void __user *)arg);
|
|
|
- case SG_GET_VERSION_NUM:
|
|
|
- return nvme_sg_get_version_num((void __user *)arg);
|
|
|
- case SG_IO:
|
|
|
- return nvme_sg_io(ns, (void __user *)arg);
|
|
|
- default:
|
|
|
- return -ENOTTY;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#ifdef CONFIG_COMPAT
|
|
|
-static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
- unsigned int cmd, unsigned long arg)
|
|
|
-{
|
|
|
- switch (cmd) {
|
|
|
- case SG_IO:
|
|
|
- return -ENOIOCTLCMD;
|
|
|
- }
|
|
|
- return nvme_ioctl(bdev, mode, cmd, arg);
|
|
|
-}
|
|
|
-#else
|
|
|
-#define nvme_compat_ioctl NULL
|
|
|
-#endif
|
|
|
-
|
|
|
-static void nvme_free_dev(struct kref *kref);
|
|
|
-static void nvme_free_ns(struct kref *kref)
|
|
|
-{
|
|
|
- struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
|
|
|
-
|
|
|
- if (ns->type == NVME_NS_LIGHTNVM)
|
|
|
- nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
|
|
|
-
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- ns->disk->private_data = NULL;
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
-
|
|
|
- kref_put(&ns->dev->kref, nvme_free_dev);
|
|
|
- put_disk(ns->disk);
|
|
|
- kfree(ns);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_open(struct block_device *bdev, fmode_t mode)
|
|
|
-{
|
|
|
- int ret = 0;
|
|
|
- struct nvme_ns *ns;
|
|
|
-
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- ns = bdev->bd_disk->private_data;
|
|
|
- if (!ns)
|
|
|
- ret = -ENXIO;
|
|
|
- else if (!kref_get_unless_zero(&ns->kref))
|
|
|
- ret = -ENXIO;
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_release(struct gendisk *disk, fmode_t mode)
|
|
|
-{
|
|
|
- struct nvme_ns *ns = disk->private_data;
|
|
|
- kref_put(&ns->kref, nvme_free_ns);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
|
|
|
-{
|
|
|
- /* some standard values */
|
|
|
- geo->heads = 1 << 6;
|
|
|
- geo->sectors = 1 << 5;
|
|
|
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_config_discard(struct nvme_ns *ns)
|
|
|
-{
|
|
|
- u32 logical_block_size = queue_logical_block_size(ns->queue);
|
|
|
- ns->queue->limits.discard_zeroes_data = 0;
|
|
|
- ns->queue->limits.discard_alignment = logical_block_size;
|
|
|
- ns->queue->limits.discard_granularity = logical_block_size;
|
|
|
- blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
|
|
|
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_revalidate_disk(struct gendisk *disk)
|
|
|
-{
|
|
|
- struct nvme_ns *ns = disk->private_data;
|
|
|
- struct nvme_dev *dev = ns->dev;
|
|
|
- struct nvme_id_ns *id;
|
|
|
- u8 lbaf, pi_type;
|
|
|
- u16 old_ms;
|
|
|
- unsigned short bs;
|
|
|
-
|
|
|
- if (nvme_identify_ns(dev, ns->ns_id, &id)) {
|
|
|
- dev_warn(dev->dev, "%s: Identify failure nvme%dn%d\n", __func__,
|
|
|
- dev->instance, ns->ns_id);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
- if (id->ncap == 0) {
|
|
|
- kfree(id);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
-
|
|
|
- if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
|
|
|
- if (nvme_nvm_register(ns->queue, disk->disk_name)) {
|
|
|
- dev_warn(dev->dev,
|
|
|
- "%s: LightNVM init failure\n", __func__);
|
|
|
- kfree(id);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
- ns->type = NVME_NS_LIGHTNVM;
|
|
|
- }
|
|
|
-
|
|
|
- old_ms = ns->ms;
|
|
|
- lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
|
|
|
- ns->lba_shift = id->lbaf[lbaf].ds;
|
|
|
- ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
|
|
|
- ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
|
|
|
-
|
|
|
- /*
|
|
|
- * If identify namespace failed, use default 512 byte block size so
|
|
|
- * block layer can use before failing read/write for 0 capacity.
|
|
|
- */
|
|
|
- if (ns->lba_shift == 0)
|
|
|
- ns->lba_shift = 9;
|
|
|
- bs = 1 << ns->lba_shift;
|
|
|
-
|
|
|
- /* XXX: PI implementation requires metadata equal t10 pi tuple size */
|
|
|
- pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
|
|
|
- id->dps & NVME_NS_DPS_PI_MASK : 0;
|
|
|
-
|
|
|
- blk_mq_freeze_queue(disk->queue);
|
|
|
- if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
|
|
|
- ns->ms != old_ms ||
|
|
|
- bs != queue_logical_block_size(disk->queue) ||
|
|
|
- (ns->ms && ns->ext)))
|
|
|
- blk_integrity_unregister(disk);
|
|
|
-
|
|
|
- ns->pi_type = pi_type;
|
|
|
- blk_queue_logical_block_size(ns->queue, bs);
|
|
|
-
|
|
|
- if (ns->ms && !ns->ext)
|
|
|
- nvme_init_integrity(ns);
|
|
|
-
|
|
|
- if ((ns->ms && !(ns->ms == 8 && ns->pi_type) &&
|
|
|
- !blk_get_integrity(disk)) ||
|
|
|
- ns->type == NVME_NS_LIGHTNVM)
|
|
|
- set_capacity(disk, 0);
|
|
|
- else
|
|
|
- set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
|
|
|
-
|
|
|
- if (dev->oncs & NVME_CTRL_ONCS_DSM)
|
|
|
- nvme_config_discard(ns);
|
|
|
- blk_mq_unfreeze_queue(disk->queue);
|
|
|
-
|
|
|
- kfree(id);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static char nvme_pr_type(enum pr_type type)
|
|
|
-{
|
|
|
- switch (type) {
|
|
|
- case PR_WRITE_EXCLUSIVE:
|
|
|
- return 1;
|
|
|
- case PR_EXCLUSIVE_ACCESS:
|
|
|
- return 2;
|
|
|
- case PR_WRITE_EXCLUSIVE_REG_ONLY:
|
|
|
- return 3;
|
|
|
- case PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
|
|
- return 4;
|
|
|
- case PR_WRITE_EXCLUSIVE_ALL_REGS:
|
|
|
- return 5;
|
|
|
- case PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
|
|
- return 6;
|
|
|
- default:
|
|
|
- return 0;
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
|
|
|
- u64 key, u64 sa_key, u8 op)
|
|
|
-{
|
|
|
- struct nvme_ns *ns = bdev->bd_disk->private_data;
|
|
|
- struct nvme_command c;
|
|
|
- u8 data[16] = { 0, };
|
|
|
-
|
|
|
- put_unaligned_le64(key, &data[0]);
|
|
|
- put_unaligned_le64(sa_key, &data[8]);
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.common.opcode = op;
|
|
|
- c.common.nsid = cpu_to_le32(ns->ns_id);
|
|
|
- c.common.cdw10[0] = cpu_to_le32(cdw10);
|
|
|
-
|
|
|
- return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_pr_register(struct block_device *bdev, u64 old,
|
|
|
- u64 new, unsigned flags)
|
|
|
-{
|
|
|
- u32 cdw10;
|
|
|
-
|
|
|
- if (flags & ~PR_FL_IGNORE_KEY)
|
|
|
- return -EOPNOTSUPP;
|
|
|
-
|
|
|
- cdw10 = old ? 2 : 0;
|
|
|
- cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
|
|
|
- cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
|
|
|
- return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_pr_reserve(struct block_device *bdev, u64 key,
|
|
|
- enum pr_type type, unsigned flags)
|
|
|
-{
|
|
|
- u32 cdw10;
|
|
|
-
|
|
|
- if (flags & ~PR_FL_IGNORE_KEY)
|
|
|
- return -EOPNOTSUPP;
|
|
|
-
|
|
|
- cdw10 = nvme_pr_type(type) << 8;
|
|
|
- cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
|
|
|
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
|
|
|
- enum pr_type type, bool abort)
|
|
|
-{
|
|
|
- u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
|
|
|
- return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_pr_clear(struct block_device *bdev, u64 key)
|
|
|
-{
|
|
|
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
|
|
|
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
|
|
-{
|
|
|
- u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
|
|
|
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct pr_ops nvme_pr_ops = {
|
|
|
- .pr_register = nvme_pr_register,
|
|
|
- .pr_reserve = nvme_pr_reserve,
|
|
|
- .pr_release = nvme_pr_release,
|
|
|
- .pr_preempt = nvme_pr_preempt,
|
|
|
- .pr_clear = nvme_pr_clear,
|
|
|
-};
|
|
|
-
|
|
|
-static const struct block_device_operations nvme_fops = {
|
|
|
- .owner = THIS_MODULE,
|
|
|
- .ioctl = nvme_ioctl,
|
|
|
- .compat_ioctl = nvme_compat_ioctl,
|
|
|
- .open = nvme_open,
|
|
|
- .release = nvme_release,
|
|
|
- .getgeo = nvme_getgeo,
|
|
|
- .revalidate_disk= nvme_revalidate_disk,
|
|
|
- .pr_ops = &nvme_pr_ops,
|
|
|
-};
|
|
|
-
|
|
|
static int nvme_kthread(void *data)
|
|
|
{
|
|
|
struct nvme_dev *dev, *next;
|
|
@@ -2212,14 +1344,20 @@ static int nvme_kthread(void *data)
|
|
|
spin_lock(&dev_list_lock);
|
|
|
list_for_each_entry_safe(dev, next, &dev_list, node) {
|
|
|
int i;
|
|
|
- u32 csts = readl(&dev->bar->csts);
|
|
|
+ u32 csts = readl(dev->bar + NVME_REG_CSTS);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Skip controllers currently under reset.
|
|
|
+ */
|
|
|
+ if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work))
|
|
|
+ continue;
|
|
|
|
|
|
if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
|
|
|
csts & NVME_CSTS_CFS) {
|
|
|
- if (!__nvme_reset(dev)) {
|
|
|
+ if (queue_work(nvme_workq, &dev->reset_work)) {
|
|
|
dev_warn(dev->dev,
|
|
|
"Failed status: %x, reset controller\n",
|
|
|
- readl(&dev->bar->csts));
|
|
|
+ readl(dev->bar + NVME_REG_CSTS));
|
|
|
}
|
|
|
continue;
|
|
|
}
|
|
@@ -2230,11 +1368,8 @@ static int nvme_kthread(void *data)
|
|
|
spin_lock_irq(&nvmeq->q_lock);
|
|
|
nvme_process_cq(nvmeq);
|
|
|
|
|
|
- while ((i == 0) && (dev->event_limit > 0)) {
|
|
|
- if (nvme_submit_async_admin_req(dev))
|
|
|
- break;
|
|
|
- dev->event_limit--;
|
|
|
- }
|
|
|
+ while (i == 0 && dev->ctrl.event_limit > 0)
|
|
|
+ nvme_submit_async_event(dev);
|
|
|
spin_unlock_irq(&nvmeq->q_lock);
|
|
|
}
|
|
|
}
|
|
@@ -2244,127 +1379,33 @@ static int nvme_kthread(void *data)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
|
|
|
-{
|
|
|
- struct nvme_ns *ns;
|
|
|
- struct gendisk *disk;
|
|
|
- int node = dev_to_node(dev->dev);
|
|
|
-
|
|
|
- ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
|
|
|
- if (!ns)
|
|
|
- return;
|
|
|
-
|
|
|
- ns->queue = blk_mq_init_queue(&dev->tagset);
|
|
|
- if (IS_ERR(ns->queue))
|
|
|
- goto out_free_ns;
|
|
|
- queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
|
|
|
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
|
|
|
- ns->dev = dev;
|
|
|
- ns->queue->queuedata = ns;
|
|
|
-
|
|
|
- disk = alloc_disk_node(0, node);
|
|
|
- if (!disk)
|
|
|
- goto out_free_queue;
|
|
|
-
|
|
|
- kref_init(&ns->kref);
|
|
|
- ns->ns_id = nsid;
|
|
|
- ns->disk = disk;
|
|
|
- ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
|
|
|
- list_add_tail(&ns->list, &dev->namespaces);
|
|
|
-
|
|
|
- blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
|
|
|
- if (dev->max_hw_sectors) {
|
|
|
- blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
|
|
|
- blk_queue_max_segments(ns->queue,
|
|
|
- (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
|
|
|
- }
|
|
|
- if (dev->stripe_size)
|
|
|
- blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
|
|
|
- if (dev->vwc & NVME_CTRL_VWC_PRESENT)
|
|
|
- blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
|
|
|
- blk_queue_virt_boundary(ns->queue, dev->page_size - 1);
|
|
|
-
|
|
|
- disk->major = nvme_major;
|
|
|
- disk->first_minor = 0;
|
|
|
- disk->fops = &nvme_fops;
|
|
|
- disk->private_data = ns;
|
|
|
- disk->queue = ns->queue;
|
|
|
- disk->driverfs_dev = dev->device;
|
|
|
- disk->flags = GENHD_FL_EXT_DEVT;
|
|
|
- sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
|
|
|
-
|
|
|
- /*
|
|
|
- * Initialize capacity to 0 until we establish the namespace format and
|
|
|
- * setup integrity extentions if necessary. The revalidate_disk after
|
|
|
- * add_disk allows the driver to register with integrity if the format
|
|
|
- * requires it.
|
|
|
- */
|
|
|
- set_capacity(disk, 0);
|
|
|
- if (nvme_revalidate_disk(ns->disk))
|
|
|
- goto out_free_disk;
|
|
|
-
|
|
|
- kref_get(&dev->kref);
|
|
|
- if (ns->type != NVME_NS_LIGHTNVM) {
|
|
|
- add_disk(ns->disk);
|
|
|
- if (ns->ms) {
|
|
|
- struct block_device *bd = bdget_disk(ns->disk, 0);
|
|
|
- if (!bd)
|
|
|
- return;
|
|
|
- if (blkdev_get(bd, FMODE_READ, NULL)) {
|
|
|
- bdput(bd);
|
|
|
- return;
|
|
|
- }
|
|
|
- blkdev_reread_part(bd);
|
|
|
- blkdev_put(bd, FMODE_READ);
|
|
|
- }
|
|
|
- }
|
|
|
- return;
|
|
|
- out_free_disk:
|
|
|
- kfree(disk);
|
|
|
- list_del(&ns->list);
|
|
|
- out_free_queue:
|
|
|
- blk_cleanup_queue(ns->queue);
|
|
|
- out_free_ns:
|
|
|
- kfree(ns);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Create I/O queues. Failing to create an I/O queue is not an issue,
|
|
|
- * we can continue with less than the desired amount of queues, and
|
|
|
- * even a controller without I/O queues an still be used to issue
|
|
|
- * admin commands. This might be useful to upgrade a buggy firmware
|
|
|
- * for example.
|
|
|
- */
|
|
|
-static void nvme_create_io_queues(struct nvme_dev *dev)
|
|
|
+static int nvme_create_io_queues(struct nvme_dev *dev)
|
|
|
{
|
|
|
unsigned i;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- for (i = dev->queue_count; i <= dev->max_qid; i++)
|
|
|
- if (!nvme_alloc_queue(dev, i, dev->q_depth))
|
|
|
+ for (i = dev->queue_count; i <= dev->max_qid; i++) {
|
|
|
+ if (!nvme_alloc_queue(dev, i, dev->q_depth)) {
|
|
|
+ ret = -ENOMEM;
|
|
|
break;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
|
|
|
- if (nvme_create_queue(dev->queues[i], i)) {
|
|
|
+ for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
|
|
|
+ ret = nvme_create_queue(dev->queues[i], i);
|
|
|
+ if (ret) {
|
|
|
nvme_free_queues(dev, i);
|
|
|
break;
|
|
|
}
|
|
|
-}
|
|
|
-
|
|
|
-static int set_queue_count(struct nvme_dev *dev, int count)
|
|
|
-{
|
|
|
- int status;
|
|
|
- u32 result;
|
|
|
- u32 q_count = (count - 1) | ((count - 1) << 16);
|
|
|
-
|
|
|
- status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
|
|
|
- &result);
|
|
|
- if (status < 0)
|
|
|
- return status;
|
|
|
- if (status > 0) {
|
|
|
- dev_err(dev->dev, "Could not set queue count (%d)\n", status);
|
|
|
- return 0;
|
|
|
}
|
|
|
- return min(result & 0xffff, result >> 16) + 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ignore failing Create SQ/CQ commands, we can continue with less
|
|
|
+ * than the desired aount of queues, and even a controller without
|
|
|
+ * I/O queues an still be used to issue admin commands. This might
|
|
|
+ * be useful to upgrade a buggy firmware for example.
|
|
|
+ */
|
|
|
+ return ret >= 0 ? 0 : ret;
|
|
|
}
|
|
|
|
|
|
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
|
@@ -2379,11 +1420,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
|
|
if (!use_cmb_sqes)
|
|
|
return NULL;
|
|
|
|
|
|
- dev->cmbsz = readl(&dev->bar->cmbsz);
|
|
|
+ dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
|
|
|
if (!(NVME_CMB_SZ(dev->cmbsz)))
|
|
|
return NULL;
|
|
|
|
|
|
- cmbloc = readl(&dev->bar->cmbloc);
|
|
|
+ cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
|
|
|
|
|
|
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
|
|
|
size = szu * NVME_CMB_SZ(dev->cmbsz);
|
|
@@ -2431,11 +1472,20 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
|
|
|
int result, i, vecs, nr_io_queues, size;
|
|
|
|
|
|
nr_io_queues = num_possible_cpus();
|
|
|
- result = set_queue_count(dev, nr_io_queues);
|
|
|
- if (result <= 0)
|
|
|
+ result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
|
|
|
+ if (result < 0)
|
|
|
return result;
|
|
|
- if (result < nr_io_queues)
|
|
|
- nr_io_queues = result;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Degraded controllers might return an error when setting the queue
|
|
|
+ * count. We still want to be able to bring them online and offer
|
|
|
+ * access to the admin queue, as that might be only way to fix them up.
|
|
|
+ */
|
|
|
+ if (result > 0) {
|
|
|
+ dev_err(dev->dev, "Could not set queue count (%d)\n", result);
|
|
|
+ nr_io_queues = 0;
|
|
|
+ result = 0;
|
|
|
+ }
|
|
|
|
|
|
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
|
|
|
result = nvme_cmb_qdepth(dev, nr_io_queues,
|
|
@@ -2457,7 +1507,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
|
|
|
return -ENOMEM;
|
|
|
size = db_bar_size(dev, nr_io_queues);
|
|
|
} while (1);
|
|
|
- dev->dbs = ((void __iomem *)dev->bar) + 4096;
|
|
|
+ dev->dbs = dev->bar + 4096;
|
|
|
adminq->q_db = dev->dbs;
|
|
|
}
|
|
|
|
|
@@ -2501,115 +1551,115 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
|
|
|
|
|
|
/* Free previously allocated queues that are no longer usable */
|
|
|
nvme_free_queues(dev, nr_io_queues + 1);
|
|
|
- nvme_create_io_queues(dev);
|
|
|
-
|
|
|
- return 0;
|
|
|
+ return nvme_create_io_queues(dev);
|
|
|
|
|
|
free_queues:
|
|
|
nvme_free_queues(dev, 1);
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|
|
+static void nvme_set_irq_hints(struct nvme_dev *dev)
|
|
|
{
|
|
|
- struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
|
|
|
- struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
|
|
|
+ struct nvme_queue *nvmeq;
|
|
|
+ int i;
|
|
|
|
|
|
- return nsa->ns_id - nsb->ns_id;
|
|
|
-}
|
|
|
+ for (i = 0; i < dev->online_queues; i++) {
|
|
|
+ nvmeq = dev->queues[i];
|
|
|
|
|
|
-static struct nvme_ns *nvme_find_ns(struct nvme_dev *dev, unsigned nsid)
|
|
|
-{
|
|
|
- struct nvme_ns *ns;
|
|
|
+ if (!nvmeq->tags || !(*nvmeq->tags))
|
|
|
+ continue;
|
|
|
|
|
|
- list_for_each_entry(ns, &dev->namespaces, list) {
|
|
|
- if (ns->ns_id == nsid)
|
|
|
- return ns;
|
|
|
- if (ns->ns_id > nsid)
|
|
|
- break;
|
|
|
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
|
|
|
+ blk_mq_tags_cpumask(*nvmeq->tags));
|
|
|
}
|
|
|
- return NULL;
|
|
|
}
|
|
|
|
|
|
-static inline bool nvme_io_incapable(struct nvme_dev *dev)
|
|
|
+static void nvme_dev_scan(struct work_struct *work)
|
|
|
{
|
|
|
- return (!dev->bar || readl(&dev->bar->csts) & NVME_CSTS_CFS ||
|
|
|
- dev->online_queues < 2);
|
|
|
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
|
|
|
+
|
|
|
+ if (!dev->tagset.tags)
|
|
|
+ return;
|
|
|
+ nvme_scan_namespaces(&dev->ctrl);
|
|
|
+ nvme_set_irq_hints(dev);
|
|
|
}
|
|
|
|
|
|
-static void nvme_ns_remove(struct nvme_ns *ns)
|
|
|
+static void nvme_del_queue_end(struct request *req, int error)
|
|
|
{
|
|
|
- bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
|
|
|
-
|
|
|
- if (kill) {
|
|
|
- blk_set_queue_dying(ns->queue);
|
|
|
+ struct nvme_queue *nvmeq = req->end_io_data;
|
|
|
|
|
|
- /*
|
|
|
- * The controller was shutdown first if we got here through
|
|
|
- * device removal. The shutdown may requeue outstanding
|
|
|
- * requests. These need to be aborted immediately so
|
|
|
- * del_gendisk doesn't block indefinitely for their completion.
|
|
|
- */
|
|
|
- blk_mq_abort_requeue_list(ns->queue);
|
|
|
- }
|
|
|
- if (ns->disk->flags & GENHD_FL_UP)
|
|
|
- del_gendisk(ns->disk);
|
|
|
- if (kill || !blk_queue_dying(ns->queue)) {
|
|
|
- blk_mq_abort_requeue_list(ns->queue);
|
|
|
- blk_cleanup_queue(ns->queue);
|
|
|
- }
|
|
|
- list_del_init(&ns->list);
|
|
|
- kref_put(&ns->kref, nvme_free_ns);
|
|
|
+ blk_mq_free_request(req);
|
|
|
+ complete(&nvmeq->dev->ioq_wait);
|
|
|
}
|
|
|
|
|
|
-static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
|
|
|
+static void nvme_del_cq_end(struct request *req, int error)
|
|
|
{
|
|
|
- struct nvme_ns *ns, *next;
|
|
|
- unsigned i;
|
|
|
+ struct nvme_queue *nvmeq = req->end_io_data;
|
|
|
|
|
|
- for (i = 1; i <= nn; i++) {
|
|
|
- ns = nvme_find_ns(dev, i);
|
|
|
- if (ns) {
|
|
|
- if (revalidate_disk(ns->disk))
|
|
|
- nvme_ns_remove(ns);
|
|
|
- } else
|
|
|
- nvme_alloc_ns(dev, i);
|
|
|
- }
|
|
|
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
|
|
|
- if (ns->ns_id > nn)
|
|
|
- nvme_ns_remove(ns);
|
|
|
+ if (!error) {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&nvmeq->q_lock, flags);
|
|
|
+ nvme_process_cq(nvmeq);
|
|
|
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
|
|
|
}
|
|
|
- list_sort(NULL, &dev->namespaces, ns_cmp);
|
|
|
+
|
|
|
+ nvme_del_queue_end(req, error);
|
|
|
}
|
|
|
|
|
|
-static void nvme_set_irq_hints(struct nvme_dev *dev)
|
|
|
+static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
|
|
{
|
|
|
- struct nvme_queue *nvmeq;
|
|
|
- int i;
|
|
|
+ struct request_queue *q = nvmeq->dev->ctrl.admin_q;
|
|
|
+ struct request *req;
|
|
|
+ struct nvme_command cmd;
|
|
|
|
|
|
- for (i = 0; i < dev->online_queues; i++) {
|
|
|
- nvmeq = dev->queues[i];
|
|
|
+ memset(&cmd, 0, sizeof(cmd));
|
|
|
+ cmd.delete_queue.opcode = opcode;
|
|
|
+ cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
|
|
|
|
|
|
- if (!nvmeq->tags || !(*nvmeq->tags))
|
|
|
- continue;
|
|
|
+ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
|
|
|
+ if (IS_ERR(req))
|
|
|
+ return PTR_ERR(req);
|
|
|
|
|
|
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
|
|
|
- blk_mq_tags_cpumask(*nvmeq->tags));
|
|
|
- }
|
|
|
+ req->timeout = ADMIN_TIMEOUT;
|
|
|
+ req->end_io_data = nvmeq;
|
|
|
+
|
|
|
+ blk_execute_rq_nowait(q, NULL, req, false,
|
|
|
+ opcode == nvme_admin_delete_cq ?
|
|
|
+ nvme_del_cq_end : nvme_del_queue_end);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static void nvme_dev_scan(struct work_struct *work)
|
|
|
+static void nvme_disable_io_queues(struct nvme_dev *dev)
|
|
|
{
|
|
|
- struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
|
|
|
- struct nvme_id_ctrl *ctrl;
|
|
|
+ int pass;
|
|
|
+ unsigned long timeout;
|
|
|
+ u8 opcode = nvme_admin_delete_sq;
|
|
|
|
|
|
- if (!dev->tagset.tags)
|
|
|
- return;
|
|
|
- if (nvme_identify_ctrl(dev, &ctrl))
|
|
|
- return;
|
|
|
- nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
|
|
|
- kfree(ctrl);
|
|
|
- nvme_set_irq_hints(dev);
|
|
|
+ for (pass = 0; pass < 2; pass++) {
|
|
|
+ int sent = 0, i = dev->queue_count - 1;
|
|
|
+
|
|
|
+ reinit_completion(&dev->ioq_wait);
|
|
|
+ retry:
|
|
|
+ timeout = ADMIN_TIMEOUT;
|
|
|
+ for (; i > 0; i--) {
|
|
|
+ struct nvme_queue *nvmeq = dev->queues[i];
|
|
|
+
|
|
|
+ if (!pass)
|
|
|
+ nvme_suspend_queue(nvmeq);
|
|
|
+ if (nvme_delete_queue(nvmeq, opcode))
|
|
|
+ break;
|
|
|
+ ++sent;
|
|
|
+ }
|
|
|
+ while (sent--) {
|
|
|
+ timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
|
|
|
+ if (timeout == 0)
|
|
|
+ return;
|
|
|
+ if (i)
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+ opcode = nvme_admin_delete_cq;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2620,42 +1670,7 @@ static void nvme_dev_scan(struct work_struct *work)
|
|
|
*/
|
|
|
static int nvme_dev_add(struct nvme_dev *dev)
|
|
|
{
|
|
|
- struct pci_dev *pdev = to_pci_dev(dev->dev);
|
|
|
- int res;
|
|
|
- struct nvme_id_ctrl *ctrl;
|
|
|
- int shift = NVME_CAP_MPSMIN(lo_hi_readq(&dev->bar->cap)) + 12;
|
|
|
-
|
|
|
- res = nvme_identify_ctrl(dev, &ctrl);
|
|
|
- if (res) {
|
|
|
- dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
|
|
|
- return -EIO;
|
|
|
- }
|
|
|
-
|
|
|
- dev->oncs = le16_to_cpup(&ctrl->oncs);
|
|
|
- dev->abort_limit = ctrl->acl + 1;
|
|
|
- dev->vwc = ctrl->vwc;
|
|
|
- memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
|
|
|
- memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
|
|
|
- memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
|
|
|
- if (ctrl->mdts)
|
|
|
- dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
|
|
|
- else
|
|
|
- dev->max_hw_sectors = UINT_MAX;
|
|
|
- if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
|
|
|
- (pdev->device == 0x0953) && ctrl->vs[3]) {
|
|
|
- unsigned int max_hw_sectors;
|
|
|
-
|
|
|
- dev->stripe_size = 1 << (ctrl->vs[3] + shift);
|
|
|
- max_hw_sectors = dev->stripe_size >> (shift - 9);
|
|
|
- if (dev->max_hw_sectors) {
|
|
|
- dev->max_hw_sectors = min(max_hw_sectors,
|
|
|
- dev->max_hw_sectors);
|
|
|
- } else
|
|
|
- dev->max_hw_sectors = max_hw_sectors;
|
|
|
- }
|
|
|
- kfree(ctrl);
|
|
|
-
|
|
|
- if (!dev->tagset.tags) {
|
|
|
+ if (!dev->ctrl.tagset) {
|
|
|
dev->tagset.ops = &nvme_mq_ops;
|
|
|
dev->tagset.nr_hw_queues = dev->online_queues - 1;
|
|
|
dev->tagset.timeout = NVME_IO_TIMEOUT;
|
|
@@ -2668,8 +1683,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
|
|
|
|
|
|
if (blk_mq_alloc_tag_set(&dev->tagset))
|
|
|
return 0;
|
|
|
+ dev->ctrl.tagset = &dev->tagset;
|
|
|
}
|
|
|
- schedule_work(&dev->scan_work);
|
|
|
+ queue_work(nvme_workq, &dev->scan_work);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2699,7 +1715,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
|
|
|
if (!dev->bar)
|
|
|
goto disable;
|
|
|
|
|
|
- if (readl(&dev->bar->csts) == -1) {
|
|
|
+ if (readl(dev->bar + NVME_REG_CSTS) == -1) {
|
|
|
result = -ENODEV;
|
|
|
goto unmap;
|
|
|
}
|
|
@@ -2714,10 +1730,11 @@ static int nvme_dev_map(struct nvme_dev *dev)
|
|
|
goto unmap;
|
|
|
}
|
|
|
|
|
|
- cap = lo_hi_readq(&dev->bar->cap);
|
|
|
+ cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
|
|
|
+
|
|
|
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
|
|
|
dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
|
|
|
- dev->dbs = ((void __iomem *)dev->bar) + 4096;
|
|
|
+ dev->dbs = dev->bar + 4096;
|
|
|
|
|
|
/*
|
|
|
* Temporary fix for the Apple controller found in the MacBook8,1 and
|
|
@@ -2730,9 +1747,11 @@ static int nvme_dev_map(struct nvme_dev *dev)
|
|
|
dev->q_depth);
|
|
|
}
|
|
|
|
|
|
- if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
|
|
|
+ if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
|
|
|
dev->cmb = nvme_map_cmb(dev);
|
|
|
|
|
|
+ pci_enable_pcie_error_reporting(pdev);
|
|
|
+ pci_save_state(pdev);
|
|
|
return 0;
|
|
|
|
|
|
unmap:
|
|
@@ -2760,152 +1779,34 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
|
|
|
pci_release_regions(pdev);
|
|
|
}
|
|
|
|
|
|
- if (pci_is_enabled(pdev))
|
|
|
+ if (pci_is_enabled(pdev)) {
|
|
|
+ pci_disable_pcie_error_reporting(pdev);
|
|
|
pci_disable_device(pdev);
|
|
|
-}
|
|
|
-
|
|
|
-struct nvme_delq_ctx {
|
|
|
- struct task_struct *waiter;
|
|
|
- struct kthread_worker *worker;
|
|
|
- atomic_t refcount;
|
|
|
-};
|
|
|
-
|
|
|
-static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
|
|
|
-{
|
|
|
- dq->waiter = current;
|
|
|
- mb();
|
|
|
-
|
|
|
- for (;;) {
|
|
|
- set_current_state(TASK_KILLABLE);
|
|
|
- if (!atomic_read(&dq->refcount))
|
|
|
- break;
|
|
|
- if (!schedule_timeout(ADMIN_TIMEOUT) ||
|
|
|
- fatal_signal_pending(current)) {
|
|
|
- /*
|
|
|
- * Disable the controller first since we can't trust it
|
|
|
- * at this point, but leave the admin queue enabled
|
|
|
- * until all queue deletion requests are flushed.
|
|
|
- * FIXME: This may take a while if there are more h/w
|
|
|
- * queues than admin tags.
|
|
|
- */
|
|
|
- set_current_state(TASK_RUNNING);
|
|
|
- nvme_disable_ctrl(dev, lo_hi_readq(&dev->bar->cap));
|
|
|
- nvme_clear_queue(dev->queues[0]);
|
|
|
- flush_kthread_worker(dq->worker);
|
|
|
- nvme_disable_queue(dev, 0);
|
|
|
- return;
|
|
|
- }
|
|
|
}
|
|
|
- set_current_state(TASK_RUNNING);
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_put_dq(struct nvme_delq_ctx *dq)
|
|
|
-{
|
|
|
- atomic_dec(&dq->refcount);
|
|
|
- if (dq->waiter)
|
|
|
- wake_up_process(dq->waiter);
|
|
|
-}
|
|
|
-
|
|
|
-static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
|
|
|
-{
|
|
|
- atomic_inc(&dq->refcount);
|
|
|
- return dq;
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_del_queue_end(struct nvme_queue *nvmeq)
|
|
|
-{
|
|
|
- struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
|
|
|
- nvme_put_dq(dq);
|
|
|
-
|
|
|
- spin_lock_irq(&nvmeq->q_lock);
|
|
|
- nvme_process_cq(nvmeq);
|
|
|
- spin_unlock_irq(&nvmeq->q_lock);
|
|
|
-}
|
|
|
-
|
|
|
-static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
|
|
|
- kthread_work_func_t fn)
|
|
|
-{
|
|
|
- struct nvme_command c;
|
|
|
-
|
|
|
- memset(&c, 0, sizeof(c));
|
|
|
- c.delete_queue.opcode = opcode;
|
|
|
- c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
|
|
|
-
|
|
|
- init_kthread_work(&nvmeq->cmdinfo.work, fn);
|
|
|
- return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo,
|
|
|
- ADMIN_TIMEOUT);
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_del_cq_work_handler(struct kthread_work *work)
|
|
|
-{
|
|
|
- struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
|
|
|
- cmdinfo.work);
|
|
|
- nvme_del_queue_end(nvmeq);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_delete_cq(struct nvme_queue *nvmeq)
|
|
|
-{
|
|
|
- return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
|
|
|
- nvme_del_cq_work_handler);
|
|
|
}
|
|
|
|
|
|
-static void nvme_del_sq_work_handler(struct kthread_work *work)
|
|
|
+static int nvme_dev_list_add(struct nvme_dev *dev)
|
|
|
{
|
|
|
- struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
|
|
|
- cmdinfo.work);
|
|
|
- int status = nvmeq->cmdinfo.status;
|
|
|
-
|
|
|
- if (!status)
|
|
|
- status = nvme_delete_cq(nvmeq);
|
|
|
- if (status)
|
|
|
- nvme_del_queue_end(nvmeq);
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_delete_sq(struct nvme_queue *nvmeq)
|
|
|
-{
|
|
|
- return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
|
|
|
- nvme_del_sq_work_handler);
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_del_queue_start(struct kthread_work *work)
|
|
|
-{
|
|
|
- struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
|
|
|
- cmdinfo.work);
|
|
|
- if (nvme_delete_sq(nvmeq))
|
|
|
- nvme_del_queue_end(nvmeq);
|
|
|
-}
|
|
|
+ bool start_thread = false;
|
|
|
|
|
|
-static void nvme_disable_io_queues(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- int i;
|
|
|
- DEFINE_KTHREAD_WORKER_ONSTACK(worker);
|
|
|
- struct nvme_delq_ctx dq;
|
|
|
- struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
|
|
|
- &worker, "nvme%d", dev->instance);
|
|
|
-
|
|
|
- if (IS_ERR(kworker_task)) {
|
|
|
- dev_err(dev->dev,
|
|
|
- "Failed to create queue del task\n");
|
|
|
- for (i = dev->queue_count - 1; i > 0; i--)
|
|
|
- nvme_disable_queue(dev, i);
|
|
|
- return;
|
|
|
+ spin_lock(&dev_list_lock);
|
|
|
+ if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
|
|
|
+ start_thread = true;
|
|
|
+ nvme_thread = NULL;
|
|
|
}
|
|
|
+ list_add(&dev->node, &dev_list);
|
|
|
+ spin_unlock(&dev_list_lock);
|
|
|
|
|
|
- dq.waiter = NULL;
|
|
|
- atomic_set(&dq.refcount, 0);
|
|
|
- dq.worker = &worker;
|
|
|
- for (i = dev->queue_count - 1; i > 0; i--) {
|
|
|
- struct nvme_queue *nvmeq = dev->queues[i];
|
|
|
+ if (start_thread) {
|
|
|
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
|
|
|
+ wake_up_all(&nvme_kthread_wait);
|
|
|
+ } else
|
|
|
+ wait_event_killable(nvme_kthread_wait, nvme_thread);
|
|
|
|
|
|
- if (nvme_suspend_queue(nvmeq))
|
|
|
- continue;
|
|
|
- nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
|
|
|
- nvmeq->cmdinfo.worker = dq.worker;
|
|
|
- init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
|
|
|
- queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
|
|
|
- }
|
|
|
- nvme_wait_dq(&dq, dev);
|
|
|
- kthread_stop(kworker_task);
|
|
|
+ if (IS_ERR_OR_NULL(nvme_thread))
|
|
|
+ return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2928,44 +1829,17 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
|
|
|
kthread_stop(tmp);
|
|
|
}
|
|
|
|
|
|
-static void nvme_freeze_queues(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- struct nvme_ns *ns;
|
|
|
-
|
|
|
- list_for_each_entry(ns, &dev->namespaces, list) {
|
|
|
- blk_mq_freeze_queue_start(ns->queue);
|
|
|
-
|
|
|
- spin_lock_irq(ns->queue->queue_lock);
|
|
|
- queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
|
|
|
- spin_unlock_irq(ns->queue->queue_lock);
|
|
|
-
|
|
|
- blk_mq_cancel_requeue_work(ns->queue);
|
|
|
- blk_mq_stop_hw_queues(ns->queue);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_unfreeze_queues(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- struct nvme_ns *ns;
|
|
|
-
|
|
|
- list_for_each_entry(ns, &dev->namespaces, list) {
|
|
|
- queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
|
|
|
- blk_mq_unfreeze_queue(ns->queue);
|
|
|
- blk_mq_start_stopped_hw_queues(ns->queue, true);
|
|
|
- blk_mq_kick_requeue_list(ns->queue);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_dev_shutdown(struct nvme_dev *dev)
|
|
|
+static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|
|
{
|
|
|
int i;
|
|
|
u32 csts = -1;
|
|
|
|
|
|
nvme_dev_list_remove(dev);
|
|
|
|
|
|
+ mutex_lock(&dev->shutdown_lock);
|
|
|
if (dev->bar) {
|
|
|
- nvme_freeze_queues(dev);
|
|
|
- csts = readl(&dev->bar->csts);
|
|
|
+ nvme_stop_queues(&dev->ctrl);
|
|
|
+ csts = readl(dev->bar + NVME_REG_CSTS);
|
|
|
}
|
|
|
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
|
|
|
for (i = dev->queue_count - 1; i >= 0; i--) {
|
|
@@ -2974,30 +1848,13 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
|
|
|
}
|
|
|
} else {
|
|
|
nvme_disable_io_queues(dev);
|
|
|
- nvme_shutdown_ctrl(dev);
|
|
|
- nvme_disable_queue(dev, 0);
|
|
|
+ nvme_disable_admin_queue(dev, shutdown);
|
|
|
}
|
|
|
nvme_dev_unmap(dev);
|
|
|
|
|
|
for (i = dev->queue_count - 1; i >= 0; i--)
|
|
|
nvme_clear_queue(dev->queues[i]);
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_dev_remove(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- struct nvme_ns *ns, *next;
|
|
|
-
|
|
|
- if (nvme_io_incapable(dev)) {
|
|
|
- /*
|
|
|
- * If the device is not capable of IO (surprise hot-removal,
|
|
|
- * for example), we need to quiesce prior to deleting the
|
|
|
- * namespaces. This will end outstanding requests and prevent
|
|
|
- * attempts to sync dirty data.
|
|
|
- */
|
|
|
- nvme_dev_shutdown(dev);
|
|
|
- }
|
|
|
- list_for_each_entry_safe(ns, next, &dev->namespaces, list)
|
|
|
- nvme_ns_remove(ns);
|
|
|
+ mutex_unlock(&dev->shutdown_lock);
|
|
|
}
|
|
|
|
|
|
static int nvme_setup_prp_pools(struct nvme_dev *dev)
|
|
@@ -3023,119 +1880,36 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
|
|
|
dma_pool_destroy(dev->prp_small_pool);
|
|
|
}
|
|
|
|
|
|
-static DEFINE_IDA(nvme_instance_ida);
|
|
|
-
|
|
|
-static int nvme_set_instance(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- int instance, error;
|
|
|
-
|
|
|
- do {
|
|
|
- if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
|
|
|
- return -ENODEV;
|
|
|
-
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- error = ida_get_new(&nvme_instance_ida, &instance);
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
- } while (error == -EAGAIN);
|
|
|
-
|
|
|
- if (error)
|
|
|
- return -ENODEV;
|
|
|
-
|
|
|
- dev->instance = instance;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_release_instance(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- ida_remove(&nvme_instance_ida, dev->instance);
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_free_dev(struct kref *kref)
|
|
|
+static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
|
|
|
{
|
|
|
- struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
|
|
|
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
|
|
|
|
|
|
put_device(dev->dev);
|
|
|
- put_device(dev->device);
|
|
|
- nvme_release_instance(dev);
|
|
|
if (dev->tagset.tags)
|
|
|
blk_mq_free_tag_set(&dev->tagset);
|
|
|
- if (dev->admin_q)
|
|
|
- blk_put_queue(dev->admin_q);
|
|
|
+ if (dev->ctrl.admin_q)
|
|
|
+ blk_put_queue(dev->ctrl.admin_q);
|
|
|
kfree(dev->queues);
|
|
|
kfree(dev->entry);
|
|
|
kfree(dev);
|
|
|
}
|
|
|
|
|
|
-static int nvme_dev_open(struct inode *inode, struct file *f)
|
|
|
-{
|
|
|
- struct nvme_dev *dev;
|
|
|
- int instance = iminor(inode);
|
|
|
- int ret = -ENODEV;
|
|
|
-
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- list_for_each_entry(dev, &dev_list, node) {
|
|
|
- if (dev->instance == instance) {
|
|
|
- if (!dev->admin_q) {
|
|
|
- ret = -EWOULDBLOCK;
|
|
|
- break;
|
|
|
- }
|
|
|
- if (!kref_get_unless_zero(&dev->kref))
|
|
|
- break;
|
|
|
- f->private_data = dev;
|
|
|
- ret = 0;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static int nvme_dev_release(struct inode *inode, struct file *f)
|
|
|
+static void nvme_reset_work(struct work_struct *work)
|
|
|
{
|
|
|
- struct nvme_dev *dev = f->private_data;
|
|
|
- kref_put(&dev->kref, nvme_free_dev);
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
|
|
|
+ int result;
|
|
|
|
|
|
-static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
|
|
-{
|
|
|
- struct nvme_dev *dev = f->private_data;
|
|
|
- struct nvme_ns *ns;
|
|
|
-
|
|
|
- switch (cmd) {
|
|
|
- case NVME_IOCTL_ADMIN_CMD:
|
|
|
- return nvme_user_cmd(dev, NULL, (void __user *)arg);
|
|
|
- case NVME_IOCTL_IO_CMD:
|
|
|
- if (list_empty(&dev->namespaces))
|
|
|
- return -ENOTTY;
|
|
|
- ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
|
|
|
- return nvme_user_cmd(dev, ns, (void __user *)arg);
|
|
|
- case NVME_IOCTL_RESET:
|
|
|
- dev_warn(dev->dev, "resetting controller\n");
|
|
|
- return nvme_reset(dev);
|
|
|
- case NVME_IOCTL_SUBSYS_RESET:
|
|
|
- return nvme_subsys_reset(dev);
|
|
|
- default:
|
|
|
- return -ENOTTY;
|
|
|
- }
|
|
|
-}
|
|
|
+ if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
|
|
|
+ goto out;
|
|
|
|
|
|
-static const struct file_operations nvme_dev_fops = {
|
|
|
- .owner = THIS_MODULE,
|
|
|
- .open = nvme_dev_open,
|
|
|
- .release = nvme_dev_release,
|
|
|
- .unlocked_ioctl = nvme_dev_ioctl,
|
|
|
- .compat_ioctl = nvme_dev_ioctl,
|
|
|
-};
|
|
|
+ /*
|
|
|
+ * If we're called to reset a live controller first shut it down before
|
|
|
+ * moving on.
|
|
|
+ */
|
|
|
+ if (dev->bar)
|
|
|
+ nvme_dev_disable(dev, false);
|
|
|
|
|
|
-static void nvme_probe_work(struct work_struct *work)
|
|
|
-{
|
|
|
- struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
|
|
|
- bool start_thread = false;
|
|
|
- int result;
|
|
|
+ set_bit(NVME_CTRL_RESETTING, &dev->flags);
|
|
|
|
|
|
result = nvme_dev_map(dev);
|
|
|
if (result)
|
|
@@ -3145,35 +1919,24 @@ static void nvme_probe_work(struct work_struct *work)
|
|
|
if (result)
|
|
|
goto unmap;
|
|
|
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
|
|
|
- start_thread = true;
|
|
|
- nvme_thread = NULL;
|
|
|
- }
|
|
|
- list_add(&dev->node, &dev_list);
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
-
|
|
|
- if (start_thread) {
|
|
|
- nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
|
|
|
- wake_up_all(&nvme_kthread_wait);
|
|
|
- } else
|
|
|
- wait_event_killable(nvme_kthread_wait, nvme_thread);
|
|
|
-
|
|
|
- if (IS_ERR_OR_NULL(nvme_thread)) {
|
|
|
- result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
|
|
|
- goto disable;
|
|
|
- }
|
|
|
-
|
|
|
nvme_init_queue(dev->queues[0], 0);
|
|
|
result = nvme_alloc_admin_tags(dev);
|
|
|
if (result)
|
|
|
goto disable;
|
|
|
|
|
|
+ result = nvme_init_identify(&dev->ctrl);
|
|
|
+ if (result)
|
|
|
+ goto free_tags;
|
|
|
+
|
|
|
result = nvme_setup_io_queues(dev);
|
|
|
if (result)
|
|
|
goto free_tags;
|
|
|
|
|
|
- dev->event_limit = 1;
|
|
|
+ dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
|
|
|
+
|
|
|
+ result = nvme_dev_list_add(dev);
|
|
|
+ if (result)
|
|
|
+ goto remove;
|
|
|
|
|
|
/*
|
|
|
* Keep the controller around but remove all namespaces if we don't have
|
|
@@ -3181,117 +1944,98 @@ static void nvme_probe_work(struct work_struct *work)
|
|
|
*/
|
|
|
if (dev->online_queues < 2) {
|
|
|
dev_warn(dev->dev, "IO queues not created\n");
|
|
|
- nvme_dev_remove(dev);
|
|
|
+ nvme_remove_namespaces(&dev->ctrl);
|
|
|
} else {
|
|
|
- nvme_unfreeze_queues(dev);
|
|
|
+ nvme_start_queues(&dev->ctrl);
|
|
|
nvme_dev_add(dev);
|
|
|
}
|
|
|
|
|
|
+ clear_bit(NVME_CTRL_RESETTING, &dev->flags);
|
|
|
return;
|
|
|
|
|
|
+ remove:
|
|
|
+ nvme_dev_list_remove(dev);
|
|
|
free_tags:
|
|
|
nvme_dev_remove_admin(dev);
|
|
|
- blk_put_queue(dev->admin_q);
|
|
|
- dev->admin_q = NULL;
|
|
|
+ blk_put_queue(dev->ctrl.admin_q);
|
|
|
+ dev->ctrl.admin_q = NULL;
|
|
|
dev->queues[0]->tags = NULL;
|
|
|
disable:
|
|
|
- nvme_disable_queue(dev, 0);
|
|
|
- nvme_dev_list_remove(dev);
|
|
|
+ nvme_disable_admin_queue(dev, false);
|
|
|
unmap:
|
|
|
nvme_dev_unmap(dev);
|
|
|
out:
|
|
|
- if (!work_busy(&dev->reset_work))
|
|
|
- nvme_dead_ctrl(dev);
|
|
|
+ nvme_remove_dead_ctrl(dev);
|
|
|
}
|
|
|
|
|
|
-static int nvme_remove_dead_ctrl(void *arg)
|
|
|
+static void nvme_remove_dead_ctrl_work(struct work_struct *work)
|
|
|
{
|
|
|
- struct nvme_dev *dev = (struct nvme_dev *)arg;
|
|
|
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
|
|
|
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
|
|
|
|
|
if (pci_get_drvdata(pdev))
|
|
|
pci_stop_and_remove_bus_device_locked(pdev);
|
|
|
- kref_put(&dev->kref, nvme_free_dev);
|
|
|
- return 0;
|
|
|
+ nvme_put_ctrl(&dev->ctrl);
|
|
|
}
|
|
|
|
|
|
-static void nvme_dead_ctrl(struct nvme_dev *dev)
|
|
|
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
|
|
|
{
|
|
|
- dev_warn(dev->dev, "Device failed to resume\n");
|
|
|
- kref_get(&dev->kref);
|
|
|
- if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
|
|
|
- dev->instance))) {
|
|
|
- dev_err(dev->dev,
|
|
|
- "Failed to start controller remove task\n");
|
|
|
- kref_put(&dev->kref, nvme_free_dev);
|
|
|
- }
|
|
|
+ dev_warn(dev->dev, "Removing after probe failure\n");
|
|
|
+ kref_get(&dev->ctrl.kref);
|
|
|
+ if (!schedule_work(&dev->remove_work))
|
|
|
+ nvme_put_ctrl(&dev->ctrl);
|
|
|
}
|
|
|
|
|
|
-static void nvme_reset_work(struct work_struct *ws)
|
|
|
+static int nvme_reset(struct nvme_dev *dev)
|
|
|
{
|
|
|
- struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
|
|
|
- bool in_probe = work_busy(&dev->probe_work);
|
|
|
-
|
|
|
- nvme_dev_shutdown(dev);
|
|
|
+ if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
|
|
|
+ return -ENODEV;
|
|
|
|
|
|
- /* Synchronize with device probe so that work will see failure status
|
|
|
- * and exit gracefully without trying to schedule another reset */
|
|
|
- flush_work(&dev->probe_work);
|
|
|
+ if (!queue_work(nvme_workq, &dev->reset_work))
|
|
|
+ return -EBUSY;
|
|
|
|
|
|
- /* Fail this device if reset occured during probe to avoid
|
|
|
- * infinite initialization loops. */
|
|
|
- if (in_probe) {
|
|
|
- nvme_dead_ctrl(dev);
|
|
|
- return;
|
|
|
- }
|
|
|
- /* Schedule device resume asynchronously so the reset work is available
|
|
|
- * to cleanup errors that may occur during reinitialization */
|
|
|
- schedule_work(&dev->probe_work);
|
|
|
+ flush_work(&dev->reset_work);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int __nvme_reset(struct nvme_dev *dev)
|
|
|
+static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
|
|
|
{
|
|
|
- if (work_pending(&dev->reset_work))
|
|
|
- return -EBUSY;
|
|
|
- list_del_init(&dev->node);
|
|
|
- queue_work(nvme_workq, &dev->reset_work);
|
|
|
+ *val = readl(to_nvme_dev(ctrl)->bar + off);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int nvme_reset(struct nvme_dev *dev)
|
|
|
+static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
|
|
|
{
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (!dev->admin_q || blk_queue_dying(dev->admin_q))
|
|
|
- return -ENODEV;
|
|
|
-
|
|
|
- spin_lock(&dev_list_lock);
|
|
|
- ret = __nvme_reset(dev);
|
|
|
- spin_unlock(&dev_list_lock);
|
|
|
-
|
|
|
- if (!ret) {
|
|
|
- flush_work(&dev->reset_work);
|
|
|
- flush_work(&dev->probe_work);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ writel(val, to_nvme_dev(ctrl)->bar + off);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- return ret;
|
|
|
+static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
|
|
|
+{
|
|
|
+ *val = readq(to_nvme_dev(ctrl)->bar + off);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static ssize_t nvme_sysfs_reset(struct device *dev,
|
|
|
- struct device_attribute *attr, const char *buf,
|
|
|
- size_t count)
|
|
|
+static bool nvme_pci_io_incapable(struct nvme_ctrl *ctrl)
|
|
|
{
|
|
|
- struct nvme_dev *ndev = dev_get_drvdata(dev);
|
|
|
- int ret;
|
|
|
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
|
|
|
|
|
|
- ret = nvme_reset(ndev);
|
|
|
- if (ret < 0)
|
|
|
- return ret;
|
|
|
+ return !dev->bar || dev->online_queues < 2;
|
|
|
+}
|
|
|
|
|
|
- return count;
|
|
|
+static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
|
|
|
+{
|
|
|
+ return nvme_reset(to_nvme_dev(ctrl));
|
|
|
}
|
|
|
-static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
|
|
|
+
|
|
|
+static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
|
|
+ .reg_read32 = nvme_pci_reg_read32,
|
|
|
+ .reg_write32 = nvme_pci_reg_write32,
|
|
|
+ .reg_read64 = nvme_pci_reg_read64,
|
|
|
+ .io_incapable = nvme_pci_io_incapable,
|
|
|
+ .reset_ctrl = nvme_pci_reset_ctrl,
|
|
|
+ .free_ctrl = nvme_pci_free_ctrl,
|
|
|
+};
|
|
|
|
|
|
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
{
|
|
@@ -3314,46 +2058,30 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
if (!dev->queues)
|
|
|
goto free;
|
|
|
|
|
|
- INIT_LIST_HEAD(&dev->namespaces);
|
|
|
- INIT_WORK(&dev->reset_work, nvme_reset_work);
|
|
|
dev->dev = get_device(&pdev->dev);
|
|
|
pci_set_drvdata(pdev, dev);
|
|
|
- result = nvme_set_instance(dev);
|
|
|
- if (result)
|
|
|
- goto put_pci;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&dev->node);
|
|
|
+ INIT_WORK(&dev->scan_work, nvme_dev_scan);
|
|
|
+ INIT_WORK(&dev->reset_work, nvme_reset_work);
|
|
|
+ INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
|
|
|
+ mutex_init(&dev->shutdown_lock);
|
|
|
+ init_completion(&dev->ioq_wait);
|
|
|
|
|
|
result = nvme_setup_prp_pools(dev);
|
|
|
if (result)
|
|
|
- goto release;
|
|
|
-
|
|
|
- kref_init(&dev->kref);
|
|
|
- dev->device = device_create(nvme_class, &pdev->dev,
|
|
|
- MKDEV(nvme_char_major, dev->instance),
|
|
|
- dev, "nvme%d", dev->instance);
|
|
|
- if (IS_ERR(dev->device)) {
|
|
|
- result = PTR_ERR(dev->device);
|
|
|
- goto release_pools;
|
|
|
- }
|
|
|
- get_device(dev->device);
|
|
|
- dev_set_drvdata(dev->device, dev);
|
|
|
+ goto put_pci;
|
|
|
|
|
|
- result = device_create_file(dev->device, &dev_attr_reset_controller);
|
|
|
+ result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
|
|
|
+ id->driver_data);
|
|
|
if (result)
|
|
|
- goto put_dev;
|
|
|
+ goto release_pools;
|
|
|
|
|
|
- INIT_LIST_HEAD(&dev->node);
|
|
|
- INIT_WORK(&dev->scan_work, nvme_dev_scan);
|
|
|
- INIT_WORK(&dev->probe_work, nvme_probe_work);
|
|
|
- schedule_work(&dev->probe_work);
|
|
|
+ queue_work(nvme_workq, &dev->reset_work);
|
|
|
return 0;
|
|
|
|
|
|
- put_dev:
|
|
|
- device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
|
|
|
- put_device(dev->device);
|
|
|
release_pools:
|
|
|
nvme_release_prp_pools(dev);
|
|
|
- release:
|
|
|
- nvme_release_instance(dev);
|
|
|
put_pci:
|
|
|
put_device(dev->dev);
|
|
|
free:
|
|
@@ -3368,15 +2096,15 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
|
|
|
struct nvme_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
if (prepare)
|
|
|
- nvme_dev_shutdown(dev);
|
|
|
+ nvme_dev_disable(dev, false);
|
|
|
else
|
|
|
- schedule_work(&dev->probe_work);
|
|
|
+ queue_work(nvme_workq, &dev->reset_work);
|
|
|
}
|
|
|
|
|
|
static void nvme_shutdown(struct pci_dev *pdev)
|
|
|
{
|
|
|
struct nvme_dev *dev = pci_get_drvdata(pdev);
|
|
|
- nvme_dev_shutdown(dev);
|
|
|
+ nvme_dev_disable(dev, true);
|
|
|
}
|
|
|
|
|
|
static void nvme_remove(struct pci_dev *pdev)
|
|
@@ -3388,34 +2116,25 @@ static void nvme_remove(struct pci_dev *pdev)
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
- flush_work(&dev->probe_work);
|
|
|
flush_work(&dev->reset_work);
|
|
|
flush_work(&dev->scan_work);
|
|
|
- device_remove_file(dev->device, &dev_attr_reset_controller);
|
|
|
- nvme_dev_remove(dev);
|
|
|
- nvme_dev_shutdown(dev);
|
|
|
+ nvme_remove_namespaces(&dev->ctrl);
|
|
|
+ nvme_uninit_ctrl(&dev->ctrl);
|
|
|
+ nvme_dev_disable(dev, true);
|
|
|
nvme_dev_remove_admin(dev);
|
|
|
- device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
|
|
|
nvme_free_queues(dev, 0);
|
|
|
nvme_release_cmb(dev);
|
|
|
nvme_release_prp_pools(dev);
|
|
|
- kref_put(&dev->kref, nvme_free_dev);
|
|
|
+ nvme_put_ctrl(&dev->ctrl);
|
|
|
}
|
|
|
|
|
|
-/* These functions are yet to be implemented */
|
|
|
-#define nvme_error_detected NULL
|
|
|
-#define nvme_dump_registers NULL
|
|
|
-#define nvme_link_reset NULL
|
|
|
-#define nvme_slot_reset NULL
|
|
|
-#define nvme_error_resume NULL
|
|
|
-
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
static int nvme_suspend(struct device *dev)
|
|
|
{
|
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
struct nvme_dev *ndev = pci_get_drvdata(pdev);
|
|
|
|
|
|
- nvme_dev_shutdown(ndev);
|
|
|
+ nvme_dev_disable(ndev, true);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3424,17 +2143,53 @@ static int nvme_resume(struct device *dev)
|
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
struct nvme_dev *ndev = pci_get_drvdata(pdev);
|
|
|
|
|
|
- schedule_work(&ndev->probe_work);
|
|
|
+ queue_work(nvme_workq, &ndev->reset_work);
|
|
|
return 0;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
|
|
|
|
|
|
+static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
|
|
|
+ pci_channel_state_t state)
|
|
|
+{
|
|
|
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * A frozen channel requires a reset. When detected, this method will
|
|
|
+ * shutdown the controller to quiesce. The controller will be restarted
|
|
|
+ * after the slot reset through driver's slot_reset callback.
|
|
|
+ */
|
|
|
+ dev_warn(&pdev->dev, "error detected: state:%d\n", state);
|
|
|
+ switch (state) {
|
|
|
+ case pci_channel_io_normal:
|
|
|
+ return PCI_ERS_RESULT_CAN_RECOVER;
|
|
|
+ case pci_channel_io_frozen:
|
|
|
+ nvme_dev_disable(dev, false);
|
|
|
+ return PCI_ERS_RESULT_NEED_RESET;
|
|
|
+ case pci_channel_io_perm_failure:
|
|
|
+ return PCI_ERS_RESULT_DISCONNECT;
|
|
|
+ }
|
|
|
+ return PCI_ERS_RESULT_NEED_RESET;
|
|
|
+}
|
|
|
+
|
|
|
+static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
|
|
|
+{
|
|
|
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
|
|
|
+
|
|
|
+ dev_info(&pdev->dev, "restart after slot reset\n");
|
|
|
+ pci_restore_state(pdev);
|
|
|
+ queue_work(nvme_workq, &dev->reset_work);
|
|
|
+ return PCI_ERS_RESULT_RECOVERED;
|
|
|
+}
|
|
|
+
|
|
|
+static void nvme_error_resume(struct pci_dev *pdev)
|
|
|
+{
|
|
|
+ pci_cleanup_aer_uncorrect_error_status(pdev);
|
|
|
+}
|
|
|
+
|
|
|
static const struct pci_error_handlers nvme_err_handler = {
|
|
|
.error_detected = nvme_error_detected,
|
|
|
- .mmio_enabled = nvme_dump_registers,
|
|
|
- .link_reset = nvme_link_reset,
|
|
|
.slot_reset = nvme_slot_reset,
|
|
|
.resume = nvme_error_resume,
|
|
|
.reset_notify = nvme_reset_notify,
|
|
@@ -3444,6 +2199,10 @@ static const struct pci_error_handlers nvme_err_handler = {
|
|
|
#define PCI_CLASS_STORAGE_EXPRESS 0x010802
|
|
|
|
|
|
static const struct pci_device_id nvme_id_table[] = {
|
|
|
+ { PCI_VDEVICE(INTEL, 0x0953),
|
|
|
+ .driver_data = NVME_QUIRK_STRIPE_SIZE, },
|
|
|
+ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
|
|
|
+ .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
|
|
|
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
|
|
|
{ 0, }
|
|
@@ -3468,40 +2227,21 @@ static int __init nvme_init(void)
|
|
|
|
|
|
init_waitqueue_head(&nvme_kthread_wait);
|
|
|
|
|
|
- nvme_workq = create_singlethread_workqueue("nvme");
|
|
|
+ nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
|
|
|
if (!nvme_workq)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- result = register_blkdev(nvme_major, "nvme");
|
|
|
+ result = nvme_core_init();
|
|
|
if (result < 0)
|
|
|
goto kill_workq;
|
|
|
- else if (result > 0)
|
|
|
- nvme_major = result;
|
|
|
-
|
|
|
- result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
|
|
|
- &nvme_dev_fops);
|
|
|
- if (result < 0)
|
|
|
- goto unregister_blkdev;
|
|
|
- else if (result > 0)
|
|
|
- nvme_char_major = result;
|
|
|
-
|
|
|
- nvme_class = class_create(THIS_MODULE, "nvme");
|
|
|
- if (IS_ERR(nvme_class)) {
|
|
|
- result = PTR_ERR(nvme_class);
|
|
|
- goto unregister_chrdev;
|
|
|
- }
|
|
|
|
|
|
result = pci_register_driver(&nvme_driver);
|
|
|
if (result)
|
|
|
- goto destroy_class;
|
|
|
+ goto core_exit;
|
|
|
return 0;
|
|
|
|
|
|
- destroy_class:
|
|
|
- class_destroy(nvme_class);
|
|
|
- unregister_chrdev:
|
|
|
- __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
|
|
|
- unregister_blkdev:
|
|
|
- unregister_blkdev(nvme_major, "nvme");
|
|
|
+ core_exit:
|
|
|
+ nvme_core_exit();
|
|
|
kill_workq:
|
|
|
destroy_workqueue(nvme_workq);
|
|
|
return result;
|
|
@@ -3510,10 +2250,8 @@ static int __init nvme_init(void)
|
|
|
static void __exit nvme_exit(void)
|
|
|
{
|
|
|
pci_unregister_driver(&nvme_driver);
|
|
|
- unregister_blkdev(nvme_major, "nvme");
|
|
|
+ nvme_core_exit();
|
|
|
destroy_workqueue(nvme_workq);
|
|
|
- class_destroy(nvme_class);
|
|
|
- __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
|
|
|
BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
|
|
|
_nvme_check_size();
|
|
|
}
|