nvmet.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #ifndef _NVMET_H
  14. #define _NVMET_H
  15. #include <linux/dma-mapping.h>
  16. #include <linux/types.h>
  17. #include <linux/device.h>
  18. #include <linux/kref.h>
  19. #include <linux/percpu-refcount.h>
  20. #include <linux/list.h>
  21. #include <linux/mutex.h>
  22. #include <linux/uuid.h>
  23. #include <linux/nvme.h>
  24. #include <linux/configfs.h>
  25. #include <linux/rcupdate.h>
  26. #include <linux/blkdev.h>
  27. #define NVMET_ASYNC_EVENTS 4
  28. #define NVMET_ERROR_LOG_SLOTS 128
  29. /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
  30. * The 16 bit shift is to set IATTR bit to 1, which means offending
  31. * offset starts in the data section of connect()
  32. */
  33. #define IPO_IATTR_CONNECT_DATA(x) \
  34. (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
  35. #define IPO_IATTR_CONNECT_SQE(x) \
  36. (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
  37. struct nvmet_ns {
  38. struct list_head dev_link;
  39. struct percpu_ref ref;
  40. struct block_device *bdev;
  41. u32 nsid;
  42. u32 blksize_shift;
  43. loff_t size;
  44. u8 nguid[16];
  45. uuid_t uuid;
  46. bool enabled;
  47. struct nvmet_subsys *subsys;
  48. const char *device_path;
  49. struct config_group device_group;
  50. struct config_group group;
  51. struct completion disable_done;
  52. };
  53. static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
  54. {
  55. return container_of(to_config_group(item), struct nvmet_ns, group);
  56. }
  57. struct nvmet_cq {
  58. u16 qid;
  59. u16 size;
  60. };
  61. struct nvmet_sq {
  62. struct nvmet_ctrl *ctrl;
  63. struct percpu_ref ref;
  64. u16 qid;
  65. u16 size;
  66. u16 sqhd;
  67. struct completion free_done;
  68. struct completion confirm_done;
  69. };
  70. /**
  71. * struct nvmet_port - Common structure to keep port
  72. * information for the target.
  73. * @entry: List head for holding a list of these elements.
  74. * @disc_addr: Address information is stored in a format defined
  75. * for a discovery log page entry.
  76. * @group: ConfigFS group for this element's folder.
  77. * @priv: Private data for the transport.
  78. */
  79. struct nvmet_port {
  80. struct list_head entry;
  81. struct nvmf_disc_rsp_page_entry disc_addr;
  82. struct config_group group;
  83. struct config_group subsys_group;
  84. struct list_head subsystems;
  85. struct config_group referrals_group;
  86. struct list_head referrals;
  87. void *priv;
  88. bool enabled;
  89. };
  90. static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
  91. {
  92. return container_of(to_config_group(item), struct nvmet_port,
  93. group);
  94. }
  95. struct nvmet_ctrl {
  96. struct nvmet_subsys *subsys;
  97. struct nvmet_cq **cqs;
  98. struct nvmet_sq **sqs;
  99. struct mutex lock;
  100. u64 cap;
  101. u32 cc;
  102. u32 csts;
  103. uuid_t hostid;
  104. u16 cntlid;
  105. u32 kato;
  106. struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
  107. unsigned int nr_async_event_cmds;
  108. struct list_head async_events;
  109. struct work_struct async_event_work;
  110. struct list_head subsys_entry;
  111. struct kref ref;
  112. struct delayed_work ka_work;
  113. struct work_struct fatal_err_work;
  114. struct nvmet_fabrics_ops *ops;
  115. char subsysnqn[NVMF_NQN_FIELD_LEN];
  116. char hostnqn[NVMF_NQN_FIELD_LEN];
  117. };
  118. struct nvmet_subsys {
  119. enum nvme_subsys_type type;
  120. struct mutex lock;
  121. struct kref ref;
  122. struct list_head namespaces;
  123. unsigned int max_nsid;
  124. struct list_head ctrls;
  125. struct list_head hosts;
  126. bool allow_any_host;
  127. u16 max_qid;
  128. u64 ver;
  129. u64 serial;
  130. char *subsysnqn;
  131. struct config_group group;
  132. struct config_group namespaces_group;
  133. struct config_group allowed_hosts_group;
  134. };
  135. static inline struct nvmet_subsys *to_subsys(struct config_item *item)
  136. {
  137. return container_of(to_config_group(item), struct nvmet_subsys, group);
  138. }
  139. static inline struct nvmet_subsys *namespaces_to_subsys(
  140. struct config_item *item)
  141. {
  142. return container_of(to_config_group(item), struct nvmet_subsys,
  143. namespaces_group);
  144. }
  145. struct nvmet_host {
  146. struct config_group group;
  147. };
  148. static inline struct nvmet_host *to_host(struct config_item *item)
  149. {
  150. return container_of(to_config_group(item), struct nvmet_host, group);
  151. }
  152. static inline char *nvmet_host_name(struct nvmet_host *host)
  153. {
  154. return config_item_name(&host->group.cg_item);
  155. }
  156. struct nvmet_host_link {
  157. struct list_head entry;
  158. struct nvmet_host *host;
  159. };
  160. struct nvmet_subsys_link {
  161. struct list_head entry;
  162. struct nvmet_subsys *subsys;
  163. };
  164. struct nvmet_req;
  165. struct nvmet_fabrics_ops {
  166. struct module *owner;
  167. unsigned int type;
  168. unsigned int sqe_inline_size;
  169. unsigned int msdbd;
  170. bool has_keyed_sgls : 1;
  171. void (*queue_response)(struct nvmet_req *req);
  172. int (*add_port)(struct nvmet_port *port);
  173. void (*remove_port)(struct nvmet_port *port);
  174. void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
  175. };
  176. #define NVMET_MAX_INLINE_BIOVEC 8
  177. struct nvmet_req {
  178. struct nvme_command *cmd;
  179. struct nvme_completion *rsp;
  180. struct nvmet_sq *sq;
  181. struct nvmet_cq *cq;
  182. struct nvmet_ns *ns;
  183. struct scatterlist *sg;
  184. struct bio inline_bio;
  185. struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
  186. int sg_cnt;
  187. size_t data_len;
  188. struct nvmet_port *port;
  189. void (*execute)(struct nvmet_req *req);
  190. struct nvmet_fabrics_ops *ops;
  191. };
  192. static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
  193. {
  194. req->rsp->status = cpu_to_le16(status << 1);
  195. }
  196. static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
  197. {
  198. req->rsp->result.u32 = cpu_to_le32(result);
  199. }
  200. /*
  201. * NVMe command writes actually are DMA reads for us on the target side.
  202. */
  203. static inline enum dma_data_direction
  204. nvmet_data_dir(struct nvmet_req *req)
  205. {
  206. return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  207. }
  208. struct nvmet_async_event {
  209. struct list_head entry;
  210. u8 event_type;
  211. u8 event_info;
  212. u8 log_page;
  213. };
  214. u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
  215. u16 nvmet_parse_io_cmd(struct nvmet_req *req);
  216. u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
  217. u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
  218. u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
  219. bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
  220. struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
  221. void nvmet_req_uninit(struct nvmet_req *req);
  222. void nvmet_req_complete(struct nvmet_req *req, u16 status);
  223. void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
  224. u16 size);
  225. void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
  226. u16 size);
  227. void nvmet_sq_destroy(struct nvmet_sq *sq);
  228. int nvmet_sq_init(struct nvmet_sq *sq);
  229. void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
  230. void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
  231. u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
  232. struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
  233. u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
  234. struct nvmet_req *req, struct nvmet_ctrl **ret);
  235. void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
  236. u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
  237. struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
  238. enum nvme_subsys_type type);
  239. void nvmet_subsys_put(struct nvmet_subsys *subsys);
  240. void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
  241. struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
  242. void nvmet_put_namespace(struct nvmet_ns *ns);
  243. int nvmet_ns_enable(struct nvmet_ns *ns);
  244. void nvmet_ns_disable(struct nvmet_ns *ns);
  245. struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
  246. void nvmet_ns_free(struct nvmet_ns *ns);
  247. int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
  248. void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
  249. int nvmet_enable_port(struct nvmet_port *port);
  250. void nvmet_disable_port(struct nvmet_port *port);
  251. void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
  252. void nvmet_referral_disable(struct nvmet_port *port);
  253. u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
  254. size_t len);
  255. u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
  256. size_t len);
  257. u32 nvmet_get_log_page_len(struct nvme_command *cmd);
  258. #define NVMET_QUEUE_SIZE 1024
  259. #define NVMET_NR_QUEUES 64
  260. #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
  261. #define NVMET_KAS 10
  262. #define NVMET_DISC_KATO 120
  263. int __init nvmet_init_configfs(void);
  264. void __exit nvmet_exit_configfs(void);
  265. int __init nvmet_init_discovery(void);
  266. void nvmet_exit_discovery(void);
  267. extern struct nvmet_subsys *nvmet_disc_subsys;
  268. extern u64 nvmet_genctr;
  269. extern struct rw_semaphore nvmet_config_sem;
  270. bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
  271. const char *hostnqn);
  272. #endif /* _NVMET_H */