|
@@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl {
|
|
|
__le16 cdw14[6];
|
|
|
};
|
|
|
|
|
|
-struct nvme_nvm_bbtbl {
|
|
|
+struct nvme_nvm_getbbtbl {
|
|
|
__u8 opcode;
|
|
|
__u8 flags;
|
|
|
__u16 command_id;
|
|
@@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl {
|
|
|
__u64 rsvd[2];
|
|
|
__le64 prp1;
|
|
|
__le64 prp2;
|
|
|
- __le32 prp1_len;
|
|
|
- __le32 prp2_len;
|
|
|
- __le32 lbb;
|
|
|
- __u32 rsvd11[3];
|
|
|
+ __le64 spba;
|
|
|
+ __u32 rsvd4[4];
|
|
|
+};
|
|
|
+
|
|
|
+struct nvme_nvm_setbbtbl {
|
|
|
+ __u8 opcode;
|
|
|
+ __u8 flags;
|
|
|
+ __u16 command_id;
|
|
|
+ __le32 nsid;
|
|
|
+ __le64 rsvd[2];
|
|
|
+ __le64 prp1;
|
|
|
+ __le64 prp2;
|
|
|
+ __le64 spba;
|
|
|
+ __le16 nlb;
|
|
|
+ __u8 value;
|
|
|
+ __u8 rsvd3;
|
|
|
+ __u32 rsvd4[3];
|
|
|
};
|
|
|
|
|
|
struct nvme_nvm_erase_blk {
|
|
@@ -129,8 +142,8 @@ struct nvme_nvm_command {
|
|
|
struct nvme_nvm_hb_rw hb_rw;
|
|
|
struct nvme_nvm_ph_rw ph_rw;
|
|
|
struct nvme_nvm_l2ptbl l2p;
|
|
|
- struct nvme_nvm_bbtbl get_bb;
|
|
|
- struct nvme_nvm_bbtbl set_bb;
|
|
|
+ struct nvme_nvm_getbbtbl get_bb;
|
|
|
+ struct nvme_nvm_setbbtbl set_bb;
|
|
|
struct nvme_nvm_erase_blk erase;
|
|
|
};
|
|
|
};
|
|
@@ -187,6 +200,20 @@ struct nvme_nvm_id {
|
|
|
struct nvme_nvm_id_group groups[4];
|
|
|
} __packed;
|
|
|
|
|
|
+struct nvme_nvm_bb_tbl {
|
|
|
+ __u8 tblid[4];
|
|
|
+ __le16 verid;
|
|
|
+ __le16 revid;
|
|
|
+ __le32 rvsd1;
|
|
|
+ __le32 tblks;
|
|
|
+ __le32 tfact;
|
|
|
+ __le32 tgrown;
|
|
|
+ __le32 tdresv;
|
|
|
+ __le32 thresv;
|
|
|
+ __le32 rsvd2[8];
|
|
|
+ __u8 blk[0];
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* Check we didn't inadvertently grow the command struct
|
|
|
*/
|
|
@@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void)
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
|
|
|
- BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64);
|
|
|
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
|
|
|
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
|
|
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
|
|
|
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
|
|
|
}
|
|
|
|
|
|
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
|
|
@@ -322,43 +351,80 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid,
|
|
|
- unsigned int nr_blocks,
|
|
|
- nvm_bb_update_fn *update_bbtbl, void *priv)
|
|
|
+static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
|
|
|
+ int nr_blocks, nvm_bb_update_fn *update_bbtbl,
|
|
|
+ void *priv)
|
|
|
{
|
|
|
struct nvme_ns *ns = q->queuedata;
|
|
|
struct nvme_dev *dev = ns->dev;
|
|
|
struct nvme_nvm_command c = {};
|
|
|
- void *bb_bitmap;
|
|
|
- u16 bb_bitmap_size;
|
|
|
+ struct nvme_nvm_bb_tbl *bb_tbl;
|
|
|
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
|
|
|
int ret = 0;
|
|
|
|
|
|
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
|
|
|
c.get_bb.nsid = cpu_to_le32(ns->ns_id);
|
|
|
- c.get_bb.lbb = cpu_to_le32(lunid);
|
|
|
- bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE;
|
|
|
- bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL);
|
|
|
- if (!bb_bitmap)
|
|
|
- return -ENOMEM;
|
|
|
+ c.get_bb.spba = cpu_to_le64(ppa.ppa);
|
|
|
|
|
|
- bitmap_zero(bb_bitmap, nr_blocks);
|
|
|
+ bb_tbl = kzalloc(tblsz, GFP_KERNEL);
|
|
|
+ if (!bb_tbl)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap,
|
|
|
- bb_bitmap_size);
|
|
|
+ ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_tbl, tblsz);
|
|
|
if (ret) {
|
|
|
dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
|
|
|
ret = -EIO;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv);
|
|
|
+ if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
|
|
|
+ bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
|
|
|
+ dev_err(dev->dev, "bbt format mismatch\n");
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (le16_to_cpu(bb_tbl->verid) != 1) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ dev_err(dev->dev, "bbt version not supported\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
|
|
|
+ le32_to_cpu(bb_tbl->tblks), nr_blocks);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
|
|
|
if (ret) {
|
|
|
ret = -EINTR;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- kfree(bb_bitmap);
|
|
|
+ kfree(bb_tbl);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
|
|
|
+ int type)
|
|
|
+{
|
|
|
+ struct nvme_ns *ns = q->queuedata;
|
|
|
+ struct nvme_dev *dev = ns->dev;
|
|
|
+ struct nvme_nvm_command c = {};
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
|
|
|
+ c.set_bb.nsid = cpu_to_le32(ns->ns_id);
|
|
|
+ c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
|
|
|
+ c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
|
|
|
+ c.set_bb.value = type;
|
|
|
+
|
|
|
+ ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
|
|
|
+ if (ret)
|
|
|
+ dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -474,6 +540,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
|
|
|
.get_l2p_tbl = nvme_nvm_get_l2p_tbl,
|
|
|
|
|
|
.get_bb_tbl = nvme_nvm_get_bb_tbl,
|
|
|
+ .set_bb_tbl = nvme_nvm_set_bb_tbl,
|
|
|
|
|
|
.submit_io = nvme_nvm_submit_io,
|
|
|
.erase_block = nvme_nvm_erase_block,
|