rdma.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587
  1. /*
  2. * NVMe over Fabrics RDMA target.
  3. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/atomic.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/module.h>
  21. #include <linux/nvme.h>
  22. #include <linux/slab.h>
  23. #include <linux/string.h>
  24. #include <linux/wait.h>
  25. #include <linux/inet.h>
  26. #include <asm/unaligned.h>
  27. #include <rdma/ib_verbs.h>
  28. #include <rdma/rdma_cm.h>
  29. #include <rdma/rw.h>
  30. #include <linux/nvme-rdma.h>
  31. #include "nvmet.h"
  32. /*
  33. * We allow up to a page of inline data to go with the SQE
  34. */
  35. #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
  36. struct nvmet_rdma_cmd {
  37. struct ib_sge sge[2];
  38. struct ib_cqe cqe;
  39. struct ib_recv_wr wr;
  40. struct scatterlist inline_sg;
  41. struct page *inline_page;
  42. struct nvme_command *nvme_cmd;
  43. struct nvmet_rdma_queue *queue;
  44. };
  45. enum {
  46. NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
  47. NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
  48. };
  49. struct nvmet_rdma_rsp {
  50. struct ib_sge send_sge;
  51. struct ib_cqe send_cqe;
  52. struct ib_send_wr send_wr;
  53. struct nvmet_rdma_cmd *cmd;
  54. struct nvmet_rdma_queue *queue;
  55. struct ib_cqe read_cqe;
  56. struct rdma_rw_ctx rw;
  57. struct nvmet_req req;
  58. u8 n_rdma;
  59. u32 flags;
  60. u32 invalidate_rkey;
  61. struct list_head wait_list;
  62. struct list_head free_list;
  63. };
  64. enum nvmet_rdma_queue_state {
  65. NVMET_RDMA_Q_CONNECTING,
  66. NVMET_RDMA_Q_LIVE,
  67. NVMET_RDMA_Q_DISCONNECTING,
  68. NVMET_RDMA_IN_DEVICE_REMOVAL,
  69. };
  70. struct nvmet_rdma_queue {
  71. struct rdma_cm_id *cm_id;
  72. struct nvmet_port *port;
  73. struct ib_cq *cq;
  74. atomic_t sq_wr_avail;
  75. struct nvmet_rdma_device *dev;
  76. spinlock_t state_lock;
  77. enum nvmet_rdma_queue_state state;
  78. struct nvmet_cq nvme_cq;
  79. struct nvmet_sq nvme_sq;
  80. struct nvmet_rdma_rsp *rsps;
  81. struct list_head free_rsps;
  82. spinlock_t rsps_lock;
  83. struct nvmet_rdma_cmd *cmds;
  84. struct work_struct release_work;
  85. struct list_head rsp_wait_list;
  86. struct list_head rsp_wr_wait_list;
  87. spinlock_t rsp_wr_wait_lock;
  88. int idx;
  89. int host_qid;
  90. int recv_queue_size;
  91. int send_queue_size;
  92. struct list_head queue_list;
  93. };
  94. struct nvmet_rdma_device {
  95. struct ib_device *device;
  96. struct ib_pd *pd;
  97. struct ib_srq *srq;
  98. struct nvmet_rdma_cmd *srq_cmds;
  99. size_t srq_size;
  100. struct kref ref;
  101. struct list_head entry;
  102. };
  103. static bool nvmet_rdma_use_srq;
  104. module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
  105. MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
  106. static DEFINE_IDA(nvmet_rdma_queue_ida);
  107. static LIST_HEAD(nvmet_rdma_queue_list);
  108. static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
  109. static LIST_HEAD(device_list);
  110. static DEFINE_MUTEX(device_list_mutex);
  111. static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
  112. static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
  113. static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
  114. static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
  115. static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
  116. static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
  117. static struct nvmet_fabrics_ops nvmet_rdma_ops;
  118. /* XXX: really should move to a generic header sooner or later.. */
  119. static inline u32 get_unaligned_le24(const u8 *p)
  120. {
  121. return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
  122. }
  123. static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
  124. {
  125. return nvme_is_write(rsp->req.cmd) &&
  126. rsp->req.transfer_len &&
  127. !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
  128. }
  129. static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
  130. {
  131. return !nvme_is_write(rsp->req.cmd) &&
  132. rsp->req.transfer_len &&
  133. !rsp->req.rsp->status &&
  134. !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
  135. }
  136. static inline struct nvmet_rdma_rsp *
  137. nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
  138. {
  139. struct nvmet_rdma_rsp *rsp;
  140. unsigned long flags;
  141. spin_lock_irqsave(&queue->rsps_lock, flags);
  142. rsp = list_first_entry(&queue->free_rsps,
  143. struct nvmet_rdma_rsp, free_list);
  144. list_del(&rsp->free_list);
  145. spin_unlock_irqrestore(&queue->rsps_lock, flags);
  146. return rsp;
  147. }
  148. static inline void
  149. nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
  150. {
  151. unsigned long flags;
  152. spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
  153. list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
  154. spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
  155. }
  156. static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
  157. {
  158. struct scatterlist *sg;
  159. int count;
  160. if (!sgl || !nents)
  161. return;
  162. for_each_sg(sgl, sg, nents, count)
  163. __free_page(sg_page(sg));
  164. kfree(sgl);
  165. }
  166. static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
  167. u32 length)
  168. {
  169. struct scatterlist *sg;
  170. struct page *page;
  171. unsigned int nent;
  172. int i = 0;
  173. nent = DIV_ROUND_UP(length, PAGE_SIZE);
  174. sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
  175. if (!sg)
  176. goto out;
  177. sg_init_table(sg, nent);
  178. while (length) {
  179. u32 page_len = min_t(u32, length, PAGE_SIZE);
  180. page = alloc_page(GFP_KERNEL);
  181. if (!page)
  182. goto out_free_pages;
  183. sg_set_page(&sg[i], page, page_len, 0);
  184. length -= page_len;
  185. i++;
  186. }
  187. *sgl = sg;
  188. *nents = nent;
  189. return 0;
  190. out_free_pages:
  191. while (i > 0) {
  192. i--;
  193. __free_page(sg_page(&sg[i]));
  194. }
  195. kfree(sg);
  196. out:
  197. return NVME_SC_INTERNAL;
  198. }
  199. static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
  200. struct nvmet_rdma_cmd *c, bool admin)
  201. {
  202. /* NVMe command / RDMA RECV */
  203. c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
  204. if (!c->nvme_cmd)
  205. goto out;
  206. c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
  207. sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
  208. if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
  209. goto out_free_cmd;
  210. c->sge[0].length = sizeof(*c->nvme_cmd);
  211. c->sge[0].lkey = ndev->pd->local_dma_lkey;
  212. if (!admin) {
  213. c->inline_page = alloc_pages(GFP_KERNEL,
  214. get_order(NVMET_RDMA_INLINE_DATA_SIZE));
  215. if (!c->inline_page)
  216. goto out_unmap_cmd;
  217. c->sge[1].addr = ib_dma_map_page(ndev->device,
  218. c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
  219. DMA_FROM_DEVICE);
  220. if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
  221. goto out_free_inline_page;
  222. c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
  223. c->sge[1].lkey = ndev->pd->local_dma_lkey;
  224. }
  225. c->cqe.done = nvmet_rdma_recv_done;
  226. c->wr.wr_cqe = &c->cqe;
  227. c->wr.sg_list = c->sge;
  228. c->wr.num_sge = admin ? 1 : 2;
  229. return 0;
  230. out_free_inline_page:
  231. if (!admin) {
  232. __free_pages(c->inline_page,
  233. get_order(NVMET_RDMA_INLINE_DATA_SIZE));
  234. }
  235. out_unmap_cmd:
  236. ib_dma_unmap_single(ndev->device, c->sge[0].addr,
  237. sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
  238. out_free_cmd:
  239. kfree(c->nvme_cmd);
  240. out:
  241. return -ENOMEM;
  242. }
  243. static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
  244. struct nvmet_rdma_cmd *c, bool admin)
  245. {
  246. if (!admin) {
  247. ib_dma_unmap_page(ndev->device, c->sge[1].addr,
  248. NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
  249. __free_pages(c->inline_page,
  250. get_order(NVMET_RDMA_INLINE_DATA_SIZE));
  251. }
  252. ib_dma_unmap_single(ndev->device, c->sge[0].addr,
  253. sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
  254. kfree(c->nvme_cmd);
  255. }
  256. static struct nvmet_rdma_cmd *
  257. nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
  258. int nr_cmds, bool admin)
  259. {
  260. struct nvmet_rdma_cmd *cmds;
  261. int ret = -EINVAL, i;
  262. cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
  263. if (!cmds)
  264. goto out;
  265. for (i = 0; i < nr_cmds; i++) {
  266. ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
  267. if (ret)
  268. goto out_free;
  269. }
  270. return cmds;
  271. out_free:
  272. while (--i >= 0)
  273. nvmet_rdma_free_cmd(ndev, cmds + i, admin);
  274. kfree(cmds);
  275. out:
  276. return ERR_PTR(ret);
  277. }
  278. static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
  279. struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
  280. {
  281. int i;
  282. for (i = 0; i < nr_cmds; i++)
  283. nvmet_rdma_free_cmd(ndev, cmds + i, admin);
  284. kfree(cmds);
  285. }
  286. static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
  287. struct nvmet_rdma_rsp *r)
  288. {
  289. /* NVMe CQE / RDMA SEND */
  290. r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
  291. if (!r->req.rsp)
  292. goto out;
  293. r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
  294. sizeof(*r->req.rsp), DMA_TO_DEVICE);
  295. if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
  296. goto out_free_rsp;
  297. r->send_sge.length = sizeof(*r->req.rsp);
  298. r->send_sge.lkey = ndev->pd->local_dma_lkey;
  299. r->send_cqe.done = nvmet_rdma_send_done;
  300. r->send_wr.wr_cqe = &r->send_cqe;
  301. r->send_wr.sg_list = &r->send_sge;
  302. r->send_wr.num_sge = 1;
  303. r->send_wr.send_flags = IB_SEND_SIGNALED;
  304. /* Data In / RDMA READ */
  305. r->read_cqe.done = nvmet_rdma_read_data_done;
  306. return 0;
  307. out_free_rsp:
  308. kfree(r->req.rsp);
  309. out:
  310. return -ENOMEM;
  311. }
  312. static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
  313. struct nvmet_rdma_rsp *r)
  314. {
  315. ib_dma_unmap_single(ndev->device, r->send_sge.addr,
  316. sizeof(*r->req.rsp), DMA_TO_DEVICE);
  317. kfree(r->req.rsp);
  318. }
  319. static int
  320. nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
  321. {
  322. struct nvmet_rdma_device *ndev = queue->dev;
  323. int nr_rsps = queue->recv_queue_size * 2;
  324. int ret = -EINVAL, i;
  325. queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
  326. GFP_KERNEL);
  327. if (!queue->rsps)
  328. goto out;
  329. for (i = 0; i < nr_rsps; i++) {
  330. struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
  331. ret = nvmet_rdma_alloc_rsp(ndev, rsp);
  332. if (ret)
  333. goto out_free;
  334. list_add_tail(&rsp->free_list, &queue->free_rsps);
  335. }
  336. return 0;
  337. out_free:
  338. while (--i >= 0) {
  339. struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
  340. list_del(&rsp->free_list);
  341. nvmet_rdma_free_rsp(ndev, rsp);
  342. }
  343. kfree(queue->rsps);
  344. out:
  345. return ret;
  346. }
  347. static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
  348. {
  349. struct nvmet_rdma_device *ndev = queue->dev;
  350. int i, nr_rsps = queue->recv_queue_size * 2;
  351. for (i = 0; i < nr_rsps; i++) {
  352. struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
  353. list_del(&rsp->free_list);
  354. nvmet_rdma_free_rsp(ndev, rsp);
  355. }
  356. kfree(queue->rsps);
  357. }
  358. static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
  359. struct nvmet_rdma_cmd *cmd)
  360. {
  361. struct ib_recv_wr *bad_wr;
  362. ib_dma_sync_single_for_device(ndev->device,
  363. cmd->sge[0].addr, cmd->sge[0].length,
  364. DMA_FROM_DEVICE);
  365. if (ndev->srq)
  366. return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
  367. return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
  368. }
  369. static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
  370. {
  371. spin_lock(&queue->rsp_wr_wait_lock);
  372. while (!list_empty(&queue->rsp_wr_wait_list)) {
  373. struct nvmet_rdma_rsp *rsp;
  374. bool ret;
  375. rsp = list_entry(queue->rsp_wr_wait_list.next,
  376. struct nvmet_rdma_rsp, wait_list);
  377. list_del(&rsp->wait_list);
  378. spin_unlock(&queue->rsp_wr_wait_lock);
  379. ret = nvmet_rdma_execute_command(rsp);
  380. spin_lock(&queue->rsp_wr_wait_lock);
  381. if (!ret) {
  382. list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
  383. break;
  384. }
  385. }
  386. spin_unlock(&queue->rsp_wr_wait_lock);
  387. }
  388. static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
  389. {
  390. struct nvmet_rdma_queue *queue = rsp->queue;
  391. atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
  392. if (rsp->n_rdma) {
  393. rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
  394. queue->cm_id->port_num, rsp->req.sg,
  395. rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
  396. }
  397. if (rsp->req.sg != &rsp->cmd->inline_sg)
  398. nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
  399. if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
  400. nvmet_rdma_process_wr_wait_list(queue);
  401. nvmet_rdma_put_rsp(rsp);
  402. }
  403. static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
  404. {
  405. if (queue->nvme_sq.ctrl) {
  406. nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
  407. } else {
  408. /*
  409. * we didn't setup the controller yet in case
  410. * of admin connect error, just disconnect and
  411. * cleanup the queue
  412. */
  413. nvmet_rdma_queue_disconnect(queue);
  414. }
  415. }
  416. static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
  417. {
  418. struct nvmet_rdma_rsp *rsp =
  419. container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
  420. nvmet_rdma_release_rsp(rsp);
  421. if (unlikely(wc->status != IB_WC_SUCCESS &&
  422. wc->status != IB_WC_WR_FLUSH_ERR)) {
  423. pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
  424. wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
  425. nvmet_rdma_error_comp(rsp->queue);
  426. }
  427. }
  428. static void nvmet_rdma_queue_response(struct nvmet_req *req)
  429. {
  430. struct nvmet_rdma_rsp *rsp =
  431. container_of(req, struct nvmet_rdma_rsp, req);
  432. struct rdma_cm_id *cm_id = rsp->queue->cm_id;
  433. struct ib_send_wr *first_wr, *bad_wr;
  434. if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
  435. rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
  436. rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
  437. } else {
  438. rsp->send_wr.opcode = IB_WR_SEND;
  439. }
  440. if (nvmet_rdma_need_data_out(rsp))
  441. first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
  442. cm_id->port_num, NULL, &rsp->send_wr);
  443. else
  444. first_wr = &rsp->send_wr;
  445. nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
  446. ib_dma_sync_single_for_device(rsp->queue->dev->device,
  447. rsp->send_sge.addr, rsp->send_sge.length,
  448. DMA_TO_DEVICE);
  449. if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
  450. pr_err("sending cmd response failed\n");
  451. nvmet_rdma_release_rsp(rsp);
  452. }
  453. }
  454. static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
  455. {
  456. struct nvmet_rdma_rsp *rsp =
  457. container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
  458. struct nvmet_rdma_queue *queue = cq->cq_context;
  459. WARN_ON(rsp->n_rdma <= 0);
  460. atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
  461. rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
  462. queue->cm_id->port_num, rsp->req.sg,
  463. rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
  464. rsp->n_rdma = 0;
  465. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  466. nvmet_req_uninit(&rsp->req);
  467. nvmet_rdma_release_rsp(rsp);
  468. if (wc->status != IB_WC_WR_FLUSH_ERR) {
  469. pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
  470. wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
  471. nvmet_rdma_error_comp(queue);
  472. }
  473. return;
  474. }
  475. nvmet_req_execute(&rsp->req);
  476. }
  477. static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
  478. u64 off)
  479. {
  480. sg_init_table(&rsp->cmd->inline_sg, 1);
  481. sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
  482. rsp->req.sg = &rsp->cmd->inline_sg;
  483. rsp->req.sg_cnt = 1;
  484. }
  485. static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
  486. {
  487. struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
  488. u64 off = le64_to_cpu(sgl->addr);
  489. u32 len = le32_to_cpu(sgl->length);
  490. if (!nvme_is_write(rsp->req.cmd))
  491. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  492. if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
  493. pr_err("invalid inline data offset!\n");
  494. return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
  495. }
  496. /* no data command? */
  497. if (!len)
  498. return 0;
  499. nvmet_rdma_use_inline_sg(rsp, len, off);
  500. rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
  501. rsp->req.transfer_len += len;
  502. return 0;
  503. }
  504. static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
  505. struct nvme_keyed_sgl_desc *sgl, bool invalidate)
  506. {
  507. struct rdma_cm_id *cm_id = rsp->queue->cm_id;
  508. u64 addr = le64_to_cpu(sgl->addr);
  509. u32 len = get_unaligned_le24(sgl->length);
  510. u32 key = get_unaligned_le32(sgl->key);
  511. int ret;
  512. u16 status;
  513. /* no data command? */
  514. if (!len)
  515. return 0;
  516. status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
  517. len);
  518. if (status)
  519. return status;
  520. ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
  521. rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
  522. nvmet_data_dir(&rsp->req));
  523. if (ret < 0)
  524. return NVME_SC_INTERNAL;
  525. rsp->req.transfer_len += len;
  526. rsp->n_rdma += ret;
  527. if (invalidate) {
  528. rsp->invalidate_rkey = key;
  529. rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
  530. }
  531. return 0;
  532. }
  533. static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
  534. {
  535. struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
  536. switch (sgl->type >> 4) {
  537. case NVME_SGL_FMT_DATA_DESC:
  538. switch (sgl->type & 0xf) {
  539. case NVME_SGL_FMT_OFFSET:
  540. return nvmet_rdma_map_sgl_inline(rsp);
  541. default:
  542. pr_err("invalid SGL subtype: %#x\n", sgl->type);
  543. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  544. }
  545. case NVME_KEY_SGL_FMT_DATA_DESC:
  546. switch (sgl->type & 0xf) {
  547. case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
  548. return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
  549. case NVME_SGL_FMT_ADDRESS:
  550. return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
  551. default:
  552. pr_err("invalid SGL subtype: %#x\n", sgl->type);
  553. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  554. }
  555. default:
  556. pr_err("invalid SGL type: %#x\n", sgl->type);
  557. return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
  558. }
  559. }
  560. static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
  561. {
  562. struct nvmet_rdma_queue *queue = rsp->queue;
  563. if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
  564. &queue->sq_wr_avail) < 0)) {
  565. pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
  566. 1 + rsp->n_rdma, queue->idx,
  567. queue->nvme_sq.ctrl->cntlid);
  568. atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
  569. return false;
  570. }
  571. if (nvmet_rdma_need_data_in(rsp)) {
  572. if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
  573. queue->cm_id->port_num, &rsp->read_cqe, NULL))
  574. nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
  575. } else {
  576. nvmet_req_execute(&rsp->req);
  577. }
  578. return true;
  579. }
  580. static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
  581. struct nvmet_rdma_rsp *cmd)
  582. {
  583. u16 status;
  584. ib_dma_sync_single_for_cpu(queue->dev->device,
  585. cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
  586. DMA_FROM_DEVICE);
  587. ib_dma_sync_single_for_cpu(queue->dev->device,
  588. cmd->send_sge.addr, cmd->send_sge.length,
  589. DMA_TO_DEVICE);
  590. if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
  591. &queue->nvme_sq, &nvmet_rdma_ops))
  592. return;
  593. status = nvmet_rdma_map_sgl(cmd);
  594. if (status)
  595. goto out_err;
  596. if (unlikely(!nvmet_rdma_execute_command(cmd))) {
  597. spin_lock(&queue->rsp_wr_wait_lock);
  598. list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
  599. spin_unlock(&queue->rsp_wr_wait_lock);
  600. }
  601. return;
  602. out_err:
  603. nvmet_req_complete(&cmd->req, status);
  604. }
  605. static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
  606. {
  607. struct nvmet_rdma_cmd *cmd =
  608. container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
  609. struct nvmet_rdma_queue *queue = cq->cq_context;
  610. struct nvmet_rdma_rsp *rsp;
  611. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  612. if (wc->status != IB_WC_WR_FLUSH_ERR) {
  613. pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
  614. wc->wr_cqe, ib_wc_status_msg(wc->status),
  615. wc->status);
  616. nvmet_rdma_error_comp(queue);
  617. }
  618. return;
  619. }
  620. if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
  621. pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
  622. nvmet_rdma_error_comp(queue);
  623. return;
  624. }
  625. cmd->queue = queue;
  626. rsp = nvmet_rdma_get_rsp(queue);
  627. rsp->queue = queue;
  628. rsp->cmd = cmd;
  629. rsp->flags = 0;
  630. rsp->req.cmd = cmd->nvme_cmd;
  631. rsp->req.port = queue->port;
  632. rsp->n_rdma = 0;
  633. if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
  634. unsigned long flags;
  635. spin_lock_irqsave(&queue->state_lock, flags);
  636. if (queue->state == NVMET_RDMA_Q_CONNECTING)
  637. list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
  638. else
  639. nvmet_rdma_put_rsp(rsp);
  640. spin_unlock_irqrestore(&queue->state_lock, flags);
  641. return;
  642. }
  643. nvmet_rdma_handle_command(queue, rsp);
  644. }
  645. static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
  646. {
  647. if (!ndev->srq)
  648. return;
  649. nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
  650. ib_destroy_srq(ndev->srq);
  651. }
  652. static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
  653. {
  654. struct ib_srq_init_attr srq_attr = { NULL, };
  655. struct ib_srq *srq;
  656. size_t srq_size;
  657. int ret, i;
  658. srq_size = 4095; /* XXX: tune */
  659. srq_attr.attr.max_wr = srq_size;
  660. srq_attr.attr.max_sge = 2;
  661. srq_attr.attr.srq_limit = 0;
  662. srq_attr.srq_type = IB_SRQT_BASIC;
  663. srq = ib_create_srq(ndev->pd, &srq_attr);
  664. if (IS_ERR(srq)) {
  665. /*
  666. * If SRQs aren't supported we just go ahead and use normal
  667. * non-shared receive queues.
  668. */
  669. pr_info("SRQ requested but not supported.\n");
  670. return 0;
  671. }
  672. ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
  673. if (IS_ERR(ndev->srq_cmds)) {
  674. ret = PTR_ERR(ndev->srq_cmds);
  675. goto out_destroy_srq;
  676. }
  677. ndev->srq = srq;
  678. ndev->srq_size = srq_size;
  679. for (i = 0; i < srq_size; i++)
  680. nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
  681. return 0;
  682. out_destroy_srq:
  683. ib_destroy_srq(srq);
  684. return ret;
  685. }
  686. static void nvmet_rdma_free_dev(struct kref *ref)
  687. {
  688. struct nvmet_rdma_device *ndev =
  689. container_of(ref, struct nvmet_rdma_device, ref);
  690. mutex_lock(&device_list_mutex);
  691. list_del(&ndev->entry);
  692. mutex_unlock(&device_list_mutex);
  693. nvmet_rdma_destroy_srq(ndev);
  694. ib_dealloc_pd(ndev->pd);
  695. kfree(ndev);
  696. }
  697. static struct nvmet_rdma_device *
  698. nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
  699. {
  700. struct nvmet_rdma_device *ndev;
  701. int ret;
  702. mutex_lock(&device_list_mutex);
  703. list_for_each_entry(ndev, &device_list, entry) {
  704. if (ndev->device->node_guid == cm_id->device->node_guid &&
  705. kref_get_unless_zero(&ndev->ref))
  706. goto out_unlock;
  707. }
  708. ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
  709. if (!ndev)
  710. goto out_err;
  711. ndev->device = cm_id->device;
  712. kref_init(&ndev->ref);
  713. ndev->pd = ib_alloc_pd(ndev->device, 0);
  714. if (IS_ERR(ndev->pd))
  715. goto out_free_dev;
  716. if (nvmet_rdma_use_srq) {
  717. ret = nvmet_rdma_init_srq(ndev);
  718. if (ret)
  719. goto out_free_pd;
  720. }
  721. list_add(&ndev->entry, &device_list);
  722. out_unlock:
  723. mutex_unlock(&device_list_mutex);
  724. pr_debug("added %s.\n", ndev->device->name);
  725. return ndev;
  726. out_free_pd:
  727. ib_dealloc_pd(ndev->pd);
  728. out_free_dev:
  729. kfree(ndev);
  730. out_err:
  731. mutex_unlock(&device_list_mutex);
  732. return NULL;
  733. }
  734. static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
  735. {
  736. struct ib_qp_init_attr qp_attr;
  737. struct nvmet_rdma_device *ndev = queue->dev;
  738. int comp_vector, nr_cqe, ret, i;
  739. /*
  740. * Spread the io queues across completion vectors,
  741. * but still keep all admin queues on vector 0.
  742. */
  743. comp_vector = !queue->host_qid ? 0 :
  744. queue->idx % ndev->device->num_comp_vectors;
  745. /*
  746. * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
  747. */
  748. nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
  749. queue->cq = ib_alloc_cq(ndev->device, queue,
  750. nr_cqe + 1, comp_vector,
  751. IB_POLL_WORKQUEUE);
  752. if (IS_ERR(queue->cq)) {
  753. ret = PTR_ERR(queue->cq);
  754. pr_err("failed to create CQ cqe= %d ret= %d\n",
  755. nr_cqe + 1, ret);
  756. goto out;
  757. }
  758. memset(&qp_attr, 0, sizeof(qp_attr));
  759. qp_attr.qp_context = queue;
  760. qp_attr.event_handler = nvmet_rdma_qp_event;
  761. qp_attr.send_cq = queue->cq;
  762. qp_attr.recv_cq = queue->cq;
  763. qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  764. qp_attr.qp_type = IB_QPT_RC;
  765. /* +1 for drain */
  766. qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
  767. qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
  768. qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
  769. ndev->device->attrs.max_sge);
  770. if (ndev->srq) {
  771. qp_attr.srq = ndev->srq;
  772. } else {
  773. /* +1 for drain */
  774. qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
  775. qp_attr.cap.max_recv_sge = 2;
  776. }
  777. ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
  778. if (ret) {
  779. pr_err("failed to create_qp ret= %d\n", ret);
  780. goto err_destroy_cq;
  781. }
  782. atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
  783. pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
  784. __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
  785. qp_attr.cap.max_send_wr, queue->cm_id);
  786. if (!ndev->srq) {
  787. for (i = 0; i < queue->recv_queue_size; i++) {
  788. queue->cmds[i].queue = queue;
  789. nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
  790. }
  791. }
  792. out:
  793. return ret;
  794. err_destroy_cq:
  795. ib_free_cq(queue->cq);
  796. goto out;
  797. }
  798. static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
  799. {
  800. ib_drain_qp(queue->cm_id->qp);
  801. rdma_destroy_qp(queue->cm_id);
  802. ib_free_cq(queue->cq);
  803. }
  804. static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
  805. {
  806. pr_info("freeing queue %d\n", queue->idx);
  807. nvmet_sq_destroy(&queue->nvme_sq);
  808. nvmet_rdma_destroy_queue_ib(queue);
  809. if (!queue->dev->srq) {
  810. nvmet_rdma_free_cmds(queue->dev, queue->cmds,
  811. queue->recv_queue_size,
  812. !queue->host_qid);
  813. }
  814. nvmet_rdma_free_rsps(queue);
  815. ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
  816. kfree(queue);
  817. }
  818. static void nvmet_rdma_release_queue_work(struct work_struct *w)
  819. {
  820. struct nvmet_rdma_queue *queue =
  821. container_of(w, struct nvmet_rdma_queue, release_work);
  822. struct rdma_cm_id *cm_id = queue->cm_id;
  823. struct nvmet_rdma_device *dev = queue->dev;
  824. enum nvmet_rdma_queue_state state = queue->state;
  825. nvmet_rdma_free_queue(queue);
  826. if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
  827. rdma_destroy_id(cm_id);
  828. kref_put(&dev->ref, nvmet_rdma_free_dev);
  829. }
  830. static int
  831. nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
  832. struct nvmet_rdma_queue *queue)
  833. {
  834. struct nvme_rdma_cm_req *req;
  835. req = (struct nvme_rdma_cm_req *)conn->private_data;
  836. if (!req || conn->private_data_len == 0)
  837. return NVME_RDMA_CM_INVALID_LEN;
  838. if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
  839. return NVME_RDMA_CM_INVALID_RECFMT;
  840. queue->host_qid = le16_to_cpu(req->qid);
  841. /*
  842. * req->hsqsize corresponds to our recv queue size plus 1
  843. * req->hrqsize corresponds to our send queue size
  844. */
  845. queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
  846. queue->send_queue_size = le16_to_cpu(req->hrqsize);
  847. if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
  848. return NVME_RDMA_CM_INVALID_HSQSIZE;
  849. /* XXX: Should we enforce some kind of max for IO queues? */
  850. return 0;
  851. }
  852. static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
  853. enum nvme_rdma_cm_status status)
  854. {
  855. struct nvme_rdma_cm_rej rej;
  856. pr_debug("rejecting connect request: status %d (%s)\n",
  857. status, nvme_rdma_cm_msg(status));
  858. rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
  859. rej.sts = cpu_to_le16(status);
  860. return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
  861. }
  862. static struct nvmet_rdma_queue *
  863. nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
  864. struct rdma_cm_id *cm_id,
  865. struct rdma_cm_event *event)
  866. {
  867. struct nvmet_rdma_queue *queue;
  868. int ret;
  869. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  870. if (!queue) {
  871. ret = NVME_RDMA_CM_NO_RSC;
  872. goto out_reject;
  873. }
  874. ret = nvmet_sq_init(&queue->nvme_sq);
  875. if (ret) {
  876. ret = NVME_RDMA_CM_NO_RSC;
  877. goto out_free_queue;
  878. }
  879. ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
  880. if (ret)
  881. goto out_destroy_sq;
  882. /*
  883. * Schedules the actual release because calling rdma_destroy_id from
  884. * inside a CM callback would trigger a deadlock. (great API design..)
  885. */
  886. INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
  887. queue->dev = ndev;
  888. queue->cm_id = cm_id;
  889. spin_lock_init(&queue->state_lock);
  890. queue->state = NVMET_RDMA_Q_CONNECTING;
  891. INIT_LIST_HEAD(&queue->rsp_wait_list);
  892. INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
  893. spin_lock_init(&queue->rsp_wr_wait_lock);
  894. INIT_LIST_HEAD(&queue->free_rsps);
  895. spin_lock_init(&queue->rsps_lock);
  896. INIT_LIST_HEAD(&queue->queue_list);
  897. queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
  898. if (queue->idx < 0) {
  899. ret = NVME_RDMA_CM_NO_RSC;
  900. goto out_destroy_sq;
  901. }
  902. ret = nvmet_rdma_alloc_rsps(queue);
  903. if (ret) {
  904. ret = NVME_RDMA_CM_NO_RSC;
  905. goto out_ida_remove;
  906. }
  907. if (!ndev->srq) {
  908. queue->cmds = nvmet_rdma_alloc_cmds(ndev,
  909. queue->recv_queue_size,
  910. !queue->host_qid);
  911. if (IS_ERR(queue->cmds)) {
  912. ret = NVME_RDMA_CM_NO_RSC;
  913. goto out_free_responses;
  914. }
  915. }
  916. ret = nvmet_rdma_create_queue_ib(queue);
  917. if (ret) {
  918. pr_err("%s: creating RDMA queue failed (%d).\n",
  919. __func__, ret);
  920. ret = NVME_RDMA_CM_NO_RSC;
  921. goto out_free_cmds;
  922. }
  923. return queue;
  924. out_free_cmds:
  925. if (!ndev->srq) {
  926. nvmet_rdma_free_cmds(queue->dev, queue->cmds,
  927. queue->recv_queue_size,
  928. !queue->host_qid);
  929. }
  930. out_free_responses:
  931. nvmet_rdma_free_rsps(queue);
  932. out_ida_remove:
  933. ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
  934. out_destroy_sq:
  935. nvmet_sq_destroy(&queue->nvme_sq);
  936. out_free_queue:
  937. kfree(queue);
  938. out_reject:
  939. nvmet_rdma_cm_reject(cm_id, ret);
  940. return NULL;
  941. }
  942. static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
  943. {
  944. struct nvmet_rdma_queue *queue = priv;
  945. switch (event->event) {
  946. case IB_EVENT_COMM_EST:
  947. rdma_notify(queue->cm_id, event->event);
  948. break;
  949. default:
  950. pr_err("received IB QP event: %s (%d)\n",
  951. ib_event_msg(event->event), event->event);
  952. break;
  953. }
  954. }
  955. static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
  956. struct nvmet_rdma_queue *queue,
  957. struct rdma_conn_param *p)
  958. {
  959. struct rdma_conn_param param = { };
  960. struct nvme_rdma_cm_rep priv = { };
  961. int ret = -ENOMEM;
  962. param.rnr_retry_count = 7;
  963. param.flow_control = 1;
  964. param.initiator_depth = min_t(u8, p->initiator_depth,
  965. queue->dev->device->attrs.max_qp_init_rd_atom);
  966. param.private_data = &priv;
  967. param.private_data_len = sizeof(priv);
  968. priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
  969. priv.crqsize = cpu_to_le16(queue->recv_queue_size);
  970. ret = rdma_accept(cm_id, &param);
  971. if (ret)
  972. pr_err("rdma_accept failed (error code = %d)\n", ret);
  973. return ret;
  974. }
  975. static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
  976. struct rdma_cm_event *event)
  977. {
  978. struct nvmet_rdma_device *ndev;
  979. struct nvmet_rdma_queue *queue;
  980. int ret = -EINVAL;
  981. ndev = nvmet_rdma_find_get_device(cm_id);
  982. if (!ndev) {
  983. nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
  984. return -ECONNREFUSED;
  985. }
  986. queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
  987. if (!queue) {
  988. ret = -ENOMEM;
  989. goto put_device;
  990. }
  991. queue->port = cm_id->context;
  992. if (queue->host_qid == 0) {
  993. /* Let inflight controller teardown complete */
  994. flush_scheduled_work();
  995. }
  996. ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
  997. if (ret)
  998. goto release_queue;
  999. mutex_lock(&nvmet_rdma_queue_mutex);
  1000. list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
  1001. mutex_unlock(&nvmet_rdma_queue_mutex);
  1002. return 0;
  1003. release_queue:
  1004. nvmet_rdma_free_queue(queue);
  1005. put_device:
  1006. kref_put(&ndev->ref, nvmet_rdma_free_dev);
  1007. return ret;
  1008. }
  1009. static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
  1010. {
  1011. unsigned long flags;
  1012. spin_lock_irqsave(&queue->state_lock, flags);
  1013. if (queue->state != NVMET_RDMA_Q_CONNECTING) {
  1014. pr_warn("trying to establish a connected queue\n");
  1015. goto out_unlock;
  1016. }
  1017. queue->state = NVMET_RDMA_Q_LIVE;
  1018. while (!list_empty(&queue->rsp_wait_list)) {
  1019. struct nvmet_rdma_rsp *cmd;
  1020. cmd = list_first_entry(&queue->rsp_wait_list,
  1021. struct nvmet_rdma_rsp, wait_list);
  1022. list_del(&cmd->wait_list);
  1023. spin_unlock_irqrestore(&queue->state_lock, flags);
  1024. nvmet_rdma_handle_command(queue, cmd);
  1025. spin_lock_irqsave(&queue->state_lock, flags);
  1026. }
  1027. out_unlock:
  1028. spin_unlock_irqrestore(&queue->state_lock, flags);
  1029. }
  1030. static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
  1031. {
  1032. bool disconnect = false;
  1033. unsigned long flags;
  1034. pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
  1035. spin_lock_irqsave(&queue->state_lock, flags);
  1036. switch (queue->state) {
  1037. case NVMET_RDMA_Q_CONNECTING:
  1038. case NVMET_RDMA_Q_LIVE:
  1039. queue->state = NVMET_RDMA_Q_DISCONNECTING;
  1040. case NVMET_RDMA_IN_DEVICE_REMOVAL:
  1041. disconnect = true;
  1042. break;
  1043. case NVMET_RDMA_Q_DISCONNECTING:
  1044. break;
  1045. }
  1046. spin_unlock_irqrestore(&queue->state_lock, flags);
  1047. if (disconnect) {
  1048. rdma_disconnect(queue->cm_id);
  1049. schedule_work(&queue->release_work);
  1050. }
  1051. }
  1052. static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
  1053. {
  1054. bool disconnect = false;
  1055. mutex_lock(&nvmet_rdma_queue_mutex);
  1056. if (!list_empty(&queue->queue_list)) {
  1057. list_del_init(&queue->queue_list);
  1058. disconnect = true;
  1059. }
  1060. mutex_unlock(&nvmet_rdma_queue_mutex);
  1061. if (disconnect)
  1062. __nvmet_rdma_queue_disconnect(queue);
  1063. }
  1064. static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
  1065. struct nvmet_rdma_queue *queue)
  1066. {
  1067. WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
  1068. mutex_lock(&nvmet_rdma_queue_mutex);
  1069. if (!list_empty(&queue->queue_list))
  1070. list_del_init(&queue->queue_list);
  1071. mutex_unlock(&nvmet_rdma_queue_mutex);
  1072. pr_err("failed to connect queue %d\n", queue->idx);
  1073. schedule_work(&queue->release_work);
  1074. }
  1075. /**
  1076. * nvme_rdma_device_removal() - Handle RDMA device removal
  1077. * @cm_id: rdma_cm id, used for nvmet port
  1078. * @queue: nvmet rdma queue (cm id qp_context)
  1079. *
  1080. * DEVICE_REMOVAL event notifies us that the RDMA device is about
  1081. * to unplug. Note that this event can be generated on a normal
  1082. * queue cm_id and/or a device bound listener cm_id (where in this
  1083. * case queue will be null).
  1084. *
  1085. * We registered an ib_client to handle device removal for queues,
  1086. * so we only need to handle the listening port cm_ids. In this case
  1087. * we nullify the priv to prevent double cm_id destruction and destroying
  1088. * the cm_id implicitely by returning a non-zero rc to the callout.
  1089. */
  1090. static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
  1091. struct nvmet_rdma_queue *queue)
  1092. {
  1093. struct nvmet_port *port;
  1094. if (queue) {
  1095. /*
  1096. * This is a queue cm_id. we have registered
  1097. * an ib_client to handle queues removal
  1098. * so don't interfear and just return.
  1099. */
  1100. return 0;
  1101. }
  1102. port = cm_id->context;
  1103. /*
  1104. * This is a listener cm_id. Make sure that
  1105. * future remove_port won't invoke a double
  1106. * cm_id destroy. use atomic xchg to make sure
  1107. * we don't compete with remove_port.
  1108. */
  1109. if (xchg(&port->priv, NULL) != cm_id)
  1110. return 0;
  1111. /*
  1112. * We need to return 1 so that the core will destroy
  1113. * it's own ID. What a great API design..
  1114. */
  1115. return 1;
  1116. }
  1117. static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
  1118. struct rdma_cm_event *event)
  1119. {
  1120. struct nvmet_rdma_queue *queue = NULL;
  1121. int ret = 0;
  1122. if (cm_id->qp)
  1123. queue = cm_id->qp->qp_context;
  1124. pr_debug("%s (%d): status %d id %p\n",
  1125. rdma_event_msg(event->event), event->event,
  1126. event->status, cm_id);
  1127. switch (event->event) {
  1128. case RDMA_CM_EVENT_CONNECT_REQUEST:
  1129. ret = nvmet_rdma_queue_connect(cm_id, event);
  1130. break;
  1131. case RDMA_CM_EVENT_ESTABLISHED:
  1132. nvmet_rdma_queue_established(queue);
  1133. break;
  1134. case RDMA_CM_EVENT_ADDR_CHANGE:
  1135. case RDMA_CM_EVENT_DISCONNECTED:
  1136. case RDMA_CM_EVENT_TIMEWAIT_EXIT:
  1137. /*
  1138. * We might end up here when we already freed the qp
  1139. * which means queue release sequence is in progress,
  1140. * so don't get in the way...
  1141. */
  1142. if (queue)
  1143. nvmet_rdma_queue_disconnect(queue);
  1144. break;
  1145. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  1146. ret = nvmet_rdma_device_removal(cm_id, queue);
  1147. break;
  1148. case RDMA_CM_EVENT_REJECTED:
  1149. pr_debug("Connection rejected: %s\n",
  1150. rdma_reject_msg(cm_id, event->status));
  1151. /* FALLTHROUGH */
  1152. case RDMA_CM_EVENT_UNREACHABLE:
  1153. case RDMA_CM_EVENT_CONNECT_ERROR:
  1154. nvmet_rdma_queue_connect_fail(cm_id, queue);
  1155. break;
  1156. default:
  1157. pr_err("received unrecognized RDMA CM event %d\n",
  1158. event->event);
  1159. break;
  1160. }
  1161. return ret;
  1162. }
  1163. static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
  1164. {
  1165. struct nvmet_rdma_queue *queue;
  1166. restart:
  1167. mutex_lock(&nvmet_rdma_queue_mutex);
  1168. list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
  1169. if (queue->nvme_sq.ctrl == ctrl) {
  1170. list_del_init(&queue->queue_list);
  1171. mutex_unlock(&nvmet_rdma_queue_mutex);
  1172. __nvmet_rdma_queue_disconnect(queue);
  1173. goto restart;
  1174. }
  1175. }
  1176. mutex_unlock(&nvmet_rdma_queue_mutex);
  1177. }
  1178. static int nvmet_rdma_add_port(struct nvmet_port *port)
  1179. {
  1180. struct rdma_cm_id *cm_id;
  1181. struct sockaddr_storage addr = { };
  1182. __kernel_sa_family_t af;
  1183. int ret;
  1184. switch (port->disc_addr.adrfam) {
  1185. case NVMF_ADDR_FAMILY_IP4:
  1186. af = AF_INET;
  1187. break;
  1188. case NVMF_ADDR_FAMILY_IP6:
  1189. af = AF_INET6;
  1190. break;
  1191. default:
  1192. pr_err("address family %d not supported\n",
  1193. port->disc_addr.adrfam);
  1194. return -EINVAL;
  1195. }
  1196. ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
  1197. port->disc_addr.trsvcid, &addr);
  1198. if (ret) {
  1199. pr_err("malformed ip/port passed: %s:%s\n",
  1200. port->disc_addr.traddr, port->disc_addr.trsvcid);
  1201. return ret;
  1202. }
  1203. cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
  1204. RDMA_PS_TCP, IB_QPT_RC);
  1205. if (IS_ERR(cm_id)) {
  1206. pr_err("CM ID creation failed\n");
  1207. return PTR_ERR(cm_id);
  1208. }
  1209. /*
  1210. * Allow both IPv4 and IPv6 sockets to bind a single port
  1211. * at the same time.
  1212. */
  1213. ret = rdma_set_afonly(cm_id, 1);
  1214. if (ret) {
  1215. pr_err("rdma_set_afonly failed (%d)\n", ret);
  1216. goto out_destroy_id;
  1217. }
  1218. ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
  1219. if (ret) {
  1220. pr_err("binding CM ID to %pISpcs failed (%d)\n",
  1221. (struct sockaddr *)&addr, ret);
  1222. goto out_destroy_id;
  1223. }
  1224. ret = rdma_listen(cm_id, 128);
  1225. if (ret) {
  1226. pr_err("listening to %pISpcs failed (%d)\n",
  1227. (struct sockaddr *)&addr, ret);
  1228. goto out_destroy_id;
  1229. }
  1230. pr_info("enabling port %d (%pISpcs)\n",
  1231. le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
  1232. port->priv = cm_id;
  1233. return 0;
  1234. out_destroy_id:
  1235. rdma_destroy_id(cm_id);
  1236. return ret;
  1237. }
  1238. static void nvmet_rdma_remove_port(struct nvmet_port *port)
  1239. {
  1240. struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
  1241. if (cm_id)
  1242. rdma_destroy_id(cm_id);
  1243. }
  1244. static struct nvmet_fabrics_ops nvmet_rdma_ops = {
  1245. .owner = THIS_MODULE,
  1246. .type = NVMF_TRTYPE_RDMA,
  1247. .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE,
  1248. .msdbd = 1,
  1249. .has_keyed_sgls = 1,
  1250. .add_port = nvmet_rdma_add_port,
  1251. .remove_port = nvmet_rdma_remove_port,
  1252. .queue_response = nvmet_rdma_queue_response,
  1253. .delete_ctrl = nvmet_rdma_delete_ctrl,
  1254. };
  1255. static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
  1256. {
  1257. struct nvmet_rdma_queue *queue, *tmp;
  1258. /* Device is being removed, delete all queues using this device */
  1259. mutex_lock(&nvmet_rdma_queue_mutex);
  1260. list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
  1261. queue_list) {
  1262. if (queue->dev->device != ib_device)
  1263. continue;
  1264. pr_info("Removing queue %d\n", queue->idx);
  1265. list_del_init(&queue->queue_list);
  1266. __nvmet_rdma_queue_disconnect(queue);
  1267. }
  1268. mutex_unlock(&nvmet_rdma_queue_mutex);
  1269. flush_scheduled_work();
  1270. }
  1271. static struct ib_client nvmet_rdma_ib_client = {
  1272. .name = "nvmet_rdma",
  1273. .remove = nvmet_rdma_remove_one
  1274. };
  1275. static int __init nvmet_rdma_init(void)
  1276. {
  1277. int ret;
  1278. ret = ib_register_client(&nvmet_rdma_ib_client);
  1279. if (ret)
  1280. return ret;
  1281. ret = nvmet_register_transport(&nvmet_rdma_ops);
  1282. if (ret)
  1283. goto err_ib_client;
  1284. return 0;
  1285. err_ib_client:
  1286. ib_unregister_client(&nvmet_rdma_ib_client);
  1287. return ret;
  1288. }
  1289. static void __exit nvmet_rdma_exit(void)
  1290. {
  1291. struct nvmet_rdma_queue *queue;
  1292. nvmet_unregister_transport(&nvmet_rdma_ops);
  1293. flush_scheduled_work();
  1294. mutex_lock(&nvmet_rdma_queue_mutex);
  1295. while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
  1296. struct nvmet_rdma_queue, queue_list))) {
  1297. list_del_init(&queue->queue_list);
  1298. mutex_unlock(&nvmet_rdma_queue_mutex);
  1299. __nvmet_rdma_queue_disconnect(queue);
  1300. mutex_lock(&nvmet_rdma_queue_mutex);
  1301. }
  1302. mutex_unlock(&nvmet_rdma_queue_mutex);
  1303. flush_scheduled_work();
  1304. ib_unregister_client(&nvmet_rdma_ib_client);
  1305. ida_destroy(&nvmet_rdma_queue_ida);
  1306. }
  1307. module_init(nvmet_rdma_init);
  1308. module_exit(nvmet_rdma_exit);
  1309. MODULE_LICENSE("GPL v2");
  1310. MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */