nvmet.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /*
  2. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #ifndef _NVMET_H
  14. #define _NVMET_H
  15. #include <linux/dma-mapping.h>
  16. #include <linux/types.h>
  17. #include <linux/device.h>
  18. #include <linux/kref.h>
  19. #include <linux/percpu-refcount.h>
  20. #include <linux/list.h>
  21. #include <linux/mutex.h>
  22. #include <linux/uuid.h>
  23. #include <linux/nvme.h>
  24. #include <linux/configfs.h>
  25. #include <linux/rcupdate.h>
  26. #include <linux/blkdev.h>
  27. #define NVMET_ASYNC_EVENTS 4
  28. #define NVMET_ERROR_LOG_SLOTS 128
  29. /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
  30. * The 16 bit shift is to set IATTR bit to 1, which means offending
  31. * offset starts in the data section of connect()
  32. */
  33. #define IPO_IATTR_CONNECT_DATA(x) \
  34. (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
  35. #define IPO_IATTR_CONNECT_SQE(x) \
  36. (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
  37. struct nvmet_ns {
  38. struct list_head dev_link;
  39. struct percpu_ref ref;
  40. struct block_device *bdev;
  41. u32 nsid;
  42. u32 blksize_shift;
  43. loff_t size;
  44. u8 nguid[16];
  45. uuid_t uuid;
  46. bool enabled;
  47. struct nvmet_subsys *subsys;
  48. const char *device_path;
  49. struct config_group device_group;
  50. struct config_group group;
  51. struct completion disable_done;
  52. };
  53. static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
  54. {
  55. return container_of(to_config_group(item), struct nvmet_ns, group);
  56. }
  57. struct nvmet_cq {
  58. u16 qid;
  59. u16 size;
  60. };
  61. struct nvmet_sq {
  62. struct nvmet_ctrl *ctrl;
  63. struct percpu_ref ref;
  64. u16 qid;
  65. u16 size;
  66. struct completion free_done;
  67. struct completion confirm_done;
  68. };
  69. /**
  70. * struct nvmet_port - Common structure to keep port
  71. * information for the target.
  72. * @entry: List head for holding a list of these elements.
  73. * @disc_addr: Address information is stored in a format defined
  74. * for a discovery log page entry.
  75. * @group: ConfigFS group for this element's folder.
  76. * @priv: Private data for the transport.
  77. */
  78. struct nvmet_port {
  79. struct list_head entry;
  80. struct nvmf_disc_rsp_page_entry disc_addr;
  81. struct config_group group;
  82. struct config_group subsys_group;
  83. struct list_head subsystems;
  84. struct config_group referrals_group;
  85. struct list_head referrals;
  86. void *priv;
  87. bool enabled;
  88. };
  89. static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
  90. {
  91. return container_of(to_config_group(item), struct nvmet_port,
  92. group);
  93. }
  94. struct nvmet_ctrl {
  95. struct nvmet_subsys *subsys;
  96. struct nvmet_cq **cqs;
  97. struct nvmet_sq **sqs;
  98. struct mutex lock;
  99. u64 cap;
  100. u64 serial;
  101. u32 cc;
  102. u32 csts;
  103. u16 cntlid;
  104. u32 kato;
  105. struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
  106. unsigned int nr_async_event_cmds;
  107. struct list_head async_events;
  108. struct work_struct async_event_work;
  109. struct list_head subsys_entry;
  110. struct kref ref;
  111. struct delayed_work ka_work;
  112. struct work_struct fatal_err_work;
  113. struct nvmet_fabrics_ops *ops;
  114. char subsysnqn[NVMF_NQN_FIELD_LEN];
  115. char hostnqn[NVMF_NQN_FIELD_LEN];
  116. };
  117. struct nvmet_subsys {
  118. enum nvme_subsys_type type;
  119. struct mutex lock;
  120. struct kref ref;
  121. struct list_head namespaces;
  122. unsigned int max_nsid;
  123. struct list_head ctrls;
  124. struct list_head hosts;
  125. bool allow_any_host;
  126. u16 max_qid;
  127. u64 ver;
  128. char *subsysnqn;
  129. struct config_group group;
  130. struct config_group namespaces_group;
  131. struct config_group allowed_hosts_group;
  132. };
  133. static inline struct nvmet_subsys *to_subsys(struct config_item *item)
  134. {
  135. return container_of(to_config_group(item), struct nvmet_subsys, group);
  136. }
  137. static inline struct nvmet_subsys *namespaces_to_subsys(
  138. struct config_item *item)
  139. {
  140. return container_of(to_config_group(item), struct nvmet_subsys,
  141. namespaces_group);
  142. }
  143. struct nvmet_host {
  144. struct config_group group;
  145. };
  146. static inline struct nvmet_host *to_host(struct config_item *item)
  147. {
  148. return container_of(to_config_group(item), struct nvmet_host, group);
  149. }
  150. static inline char *nvmet_host_name(struct nvmet_host *host)
  151. {
  152. return config_item_name(&host->group.cg_item);
  153. }
  154. struct nvmet_host_link {
  155. struct list_head entry;
  156. struct nvmet_host *host;
  157. };
  158. struct nvmet_subsys_link {
  159. struct list_head entry;
  160. struct nvmet_subsys *subsys;
  161. };
  162. struct nvmet_req;
  163. struct nvmet_fabrics_ops {
  164. struct module *owner;
  165. unsigned int type;
  166. unsigned int sqe_inline_size;
  167. unsigned int msdbd;
  168. bool has_keyed_sgls : 1;
  169. void (*queue_response)(struct nvmet_req *req);
  170. int (*add_port)(struct nvmet_port *port);
  171. void (*remove_port)(struct nvmet_port *port);
  172. void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
  173. };
  174. #define NVMET_MAX_INLINE_BIOVEC 8
  175. struct nvmet_req {
  176. struct nvme_command *cmd;
  177. struct nvme_completion *rsp;
  178. struct nvmet_sq *sq;
  179. struct nvmet_cq *cq;
  180. struct nvmet_ns *ns;
  181. struct scatterlist *sg;
  182. struct bio inline_bio;
  183. struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
  184. int sg_cnt;
  185. size_t data_len;
  186. struct nvmet_port *port;
  187. void (*execute)(struct nvmet_req *req);
  188. struct nvmet_fabrics_ops *ops;
  189. };
  190. static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
  191. {
  192. req->rsp->status = cpu_to_le16(status << 1);
  193. }
  194. static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
  195. {
  196. req->rsp->result.u32 = cpu_to_le32(result);
  197. }
  198. /*
  199. * NVMe command writes actually are DMA reads for us on the target side.
  200. */
  201. static inline enum dma_data_direction
  202. nvmet_data_dir(struct nvmet_req *req)
  203. {
  204. return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  205. }
  206. struct nvmet_async_event {
  207. struct list_head entry;
  208. u8 event_type;
  209. u8 event_info;
  210. u8 log_page;
  211. };
  212. u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
  213. u16 nvmet_parse_io_cmd(struct nvmet_req *req);
  214. u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
  215. u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
  216. u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
  217. bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
  218. struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
  219. void nvmet_req_uninit(struct nvmet_req *req);
  220. void nvmet_req_complete(struct nvmet_req *req, u16 status);
  221. void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
  222. u16 size);
  223. void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
  224. u16 size);
  225. void nvmet_sq_destroy(struct nvmet_sq *sq);
  226. int nvmet_sq_init(struct nvmet_sq *sq);
  227. void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
  228. void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
  229. u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
  230. struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
  231. u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
  232. struct nvmet_req *req, struct nvmet_ctrl **ret);
  233. void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
  234. u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
  235. struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
  236. enum nvme_subsys_type type);
  237. void nvmet_subsys_put(struct nvmet_subsys *subsys);
  238. void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
  239. struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
  240. void nvmet_put_namespace(struct nvmet_ns *ns);
  241. int nvmet_ns_enable(struct nvmet_ns *ns);
  242. void nvmet_ns_disable(struct nvmet_ns *ns);
  243. struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
  244. void nvmet_ns_free(struct nvmet_ns *ns);
  245. int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
  246. void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
  247. int nvmet_enable_port(struct nvmet_port *port);
  248. void nvmet_disable_port(struct nvmet_port *port);
  249. void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
  250. void nvmet_referral_disable(struct nvmet_port *port);
  251. u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
  252. size_t len);
  253. u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
  254. size_t len);
  255. u32 nvmet_get_log_page_len(struct nvme_command *cmd);
  256. #define NVMET_QUEUE_SIZE 1024
  257. #define NVMET_NR_QUEUES 64
  258. #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
  259. #define NVMET_KAS 10
  260. #define NVMET_DISC_KATO 120
  261. int __init nvmet_init_configfs(void);
  262. void __exit nvmet_exit_configfs(void);
  263. int __init nvmet_init_discovery(void);
  264. void nvmet_exit_discovery(void);
  265. extern struct nvmet_subsys *nvmet_disc_subsys;
  266. extern u64 nvmet_genctr;
  267. extern struct rw_semaphore nvmet_config_sem;
  268. bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
  269. const char *hostnqn);
  270. #endif /* _NVMET_H */