smc_wr.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * Work Requests exploiting Infiniband API
  6. *
  7. * Work requests (WR) of type ib_post_send or ib_post_recv respectively
  8. * are submitted to either RC SQ or RC RQ respectively
  9. * (reliably connected send/receive queue)
  10. * and become work queue entries (WQEs).
  11. * While an SQ WR/WQE is pending, we track it until transmission completion.
  12. * Through a send or receive completion queue (CQ) respectively,
  13. * we get completion queue entries (CQEs) [aka work completions (WCs)].
  14. * Since the CQ callback is called from IRQ context, we split work by using
  15. * bottom halves implemented by tasklets.
  16. *
  17. * SMC uses this to exchange LLC (link layer control)
  18. * and CDC (connection data control) messages.
  19. *
  20. * Copyright IBM Corp. 2016
  21. *
  22. * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
  23. */
  24. #include <linux/atomic.h>
  25. #include <linux/hashtable.h>
  26. #include <linux/wait.h>
  27. #include <rdma/ib_verbs.h>
  28. #include <asm/div64.h>
  29. #include "smc.h"
  30. #include "smc_wr.h"
  31. #define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
  32. #define SMC_WR_RX_HASH_BITS 4
  33. static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
  34. static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
  35. struct smc_wr_tx_pend { /* control data for a pending send request */
  36. u64 wr_id; /* work request id sent */
  37. smc_wr_tx_handler handler;
  38. enum ib_wc_status wc_status; /* CQE status */
  39. struct smc_link *link;
  40. u32 idx;
  41. struct smc_wr_tx_pend_priv priv;
  42. };
  43. /******************************** send queue *********************************/
  44. /*------------------------------- completion --------------------------------*/
  45. static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
  46. {
  47. u32 i;
  48. for (i = 0; i < link->wr_tx_cnt; i++) {
  49. if (link->wr_tx_pends[i].wr_id == wr_id)
  50. return i;
  51. }
  52. return link->wr_tx_cnt;
  53. }
  54. static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
  55. {
  56. struct smc_wr_tx_pend pnd_snd;
  57. struct smc_link *link;
  58. u32 pnd_snd_idx;
  59. int i;
  60. link = wc->qp->qp_context;
  61. if (wc->opcode == IB_WC_REG_MR) {
  62. if (wc->status)
  63. link->wr_reg_state = FAILED;
  64. else
  65. link->wr_reg_state = CONFIRMED;
  66. wake_up(&link->wr_reg_wait);
  67. return;
  68. }
  69. pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
  70. if (pnd_snd_idx == link->wr_tx_cnt)
  71. return;
  72. link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
  73. memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
  74. /* clear the full struct smc_wr_tx_pend including .priv */
  75. memset(&link->wr_tx_pends[pnd_snd_idx], 0,
  76. sizeof(link->wr_tx_pends[pnd_snd_idx]));
  77. memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
  78. sizeof(link->wr_tx_bufs[pnd_snd_idx]));
  79. if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
  80. return;
  81. if (wc->status) {
  82. struct smc_link_group *lgr;
  83. for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
  84. /* clear full struct smc_wr_tx_pend including .priv */
  85. memset(&link->wr_tx_pends[i], 0,
  86. sizeof(link->wr_tx_pends[i]));
  87. memset(&link->wr_tx_bufs[i], 0,
  88. sizeof(link->wr_tx_bufs[i]));
  89. clear_bit(i, link->wr_tx_mask);
  90. }
  91. /* terminate connections of this link group abnormally */
  92. lgr = container_of(link, struct smc_link_group,
  93. lnk[SMC_SINGLE_LINK]);
  94. smc_lgr_terminate(lgr);
  95. }
  96. if (pnd_snd.handler)
  97. pnd_snd.handler(&pnd_snd.priv, link, wc->status);
  98. wake_up(&link->wr_tx_wait);
  99. }
  100. static void smc_wr_tx_tasklet_fn(unsigned long data)
  101. {
  102. struct smc_ib_device *dev = (struct smc_ib_device *)data;
  103. struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
  104. int i = 0, rc;
  105. int polled = 0;
  106. again:
  107. polled++;
  108. do {
  109. memset(&wc, 0, sizeof(wc));
  110. rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
  111. if (polled == 1) {
  112. ib_req_notify_cq(dev->roce_cq_send,
  113. IB_CQ_NEXT_COMP |
  114. IB_CQ_REPORT_MISSED_EVENTS);
  115. }
  116. if (!rc)
  117. break;
  118. for (i = 0; i < rc; i++)
  119. smc_wr_tx_process_cqe(&wc[i]);
  120. } while (rc > 0);
  121. if (polled == 1)
  122. goto again;
  123. }
  124. void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
  125. {
  126. struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
  127. tasklet_schedule(&dev->send_tasklet);
  128. }
  129. /*---------------------------- request submission ---------------------------*/
  130. static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
  131. {
  132. *idx = link->wr_tx_cnt;
  133. for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
  134. if (!test_and_set_bit(*idx, link->wr_tx_mask))
  135. return 0;
  136. }
  137. *idx = link->wr_tx_cnt;
  138. return -EBUSY;
  139. }
  140. /**
  141. * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
  142. * and sets info for pending transmit tracking
  143. * @link: Pointer to smc_link used to later send the message.
  144. * @handler: Send completion handler function pointer.
  145. * @wr_buf: Out value returns pointer to message buffer.
  146. * @wr_pend_priv: Out value returns pointer serving as handler context.
  147. *
  148. * Return: 0 on success, or -errno on error.
  149. */
  150. int smc_wr_tx_get_free_slot(struct smc_link *link,
  151. smc_wr_tx_handler handler,
  152. struct smc_wr_buf **wr_buf,
  153. struct smc_wr_tx_pend_priv **wr_pend_priv)
  154. {
  155. struct smc_wr_tx_pend *wr_pend;
  156. u32 idx = link->wr_tx_cnt;
  157. struct ib_send_wr *wr_ib;
  158. u64 wr_id;
  159. int rc;
  160. *wr_buf = NULL;
  161. *wr_pend_priv = NULL;
  162. if (in_softirq()) {
  163. rc = smc_wr_tx_get_free_slot_index(link, &idx);
  164. if (rc)
  165. return rc;
  166. } else {
  167. struct smc_link_group *lgr;
  168. lgr = container_of(link, struct smc_link_group,
  169. lnk[SMC_SINGLE_LINK]);
  170. rc = wait_event_timeout(
  171. link->wr_tx_wait,
  172. list_empty(&lgr->list) || /* lgr terminated */
  173. (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
  174. SMC_WR_TX_WAIT_FREE_SLOT_TIME);
  175. if (!rc) {
  176. /* timeout - terminate connections */
  177. smc_lgr_terminate(lgr);
  178. return -EPIPE;
  179. }
  180. if (idx == link->wr_tx_cnt)
  181. return -EPIPE;
  182. }
  183. wr_id = smc_wr_tx_get_next_wr_id(link);
  184. wr_pend = &link->wr_tx_pends[idx];
  185. wr_pend->wr_id = wr_id;
  186. wr_pend->handler = handler;
  187. wr_pend->link = link;
  188. wr_pend->idx = idx;
  189. wr_ib = &link->wr_tx_ibs[idx];
  190. wr_ib->wr_id = wr_id;
  191. *wr_buf = &link->wr_tx_bufs[idx];
  192. *wr_pend_priv = &wr_pend->priv;
  193. return 0;
  194. }
  195. int smc_wr_tx_put_slot(struct smc_link *link,
  196. struct smc_wr_tx_pend_priv *wr_pend_priv)
  197. {
  198. struct smc_wr_tx_pend *pend;
  199. pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
  200. if (pend->idx < link->wr_tx_cnt) {
  201. /* clear the full struct smc_wr_tx_pend including .priv */
  202. memset(&link->wr_tx_pends[pend->idx], 0,
  203. sizeof(link->wr_tx_pends[pend->idx]));
  204. memset(&link->wr_tx_bufs[pend->idx], 0,
  205. sizeof(link->wr_tx_bufs[pend->idx]));
  206. test_and_clear_bit(pend->idx, link->wr_tx_mask);
  207. return 1;
  208. }
  209. return 0;
  210. }
  211. /* Send prepared WR slot via ib_post_send.
  212. * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
  213. */
  214. int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
  215. {
  216. struct ib_send_wr *failed_wr = NULL;
  217. struct smc_wr_tx_pend *pend;
  218. int rc;
  219. ib_req_notify_cq(link->smcibdev->roce_cq_send,
  220. IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
  221. pend = container_of(priv, struct smc_wr_tx_pend, priv);
  222. rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
  223. &failed_wr);
  224. if (rc) {
  225. struct smc_link_group *lgr =
  226. container_of(link, struct smc_link_group,
  227. lnk[SMC_SINGLE_LINK]);
  228. smc_wr_tx_put_slot(link, priv);
  229. smc_lgr_terminate(lgr);
  230. }
  231. return rc;
  232. }
  233. /* Register a memory region and wait for result. */
  234. int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
  235. {
  236. struct ib_send_wr *failed_wr = NULL;
  237. int rc;
  238. ib_req_notify_cq(link->smcibdev->roce_cq_send,
  239. IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
  240. link->wr_reg_state = POSTED;
  241. link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
  242. link->wr_reg.mr = mr;
  243. link->wr_reg.key = mr->rkey;
  244. failed_wr = &link->wr_reg.wr;
  245. rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, &failed_wr);
  246. WARN_ON(failed_wr != &link->wr_reg.wr);
  247. if (rc)
  248. return rc;
  249. rc = wait_event_interruptible_timeout(link->wr_reg_wait,
  250. (link->wr_reg_state != POSTED),
  251. SMC_WR_REG_MR_WAIT_TIME);
  252. if (!rc) {
  253. /* timeout - terminate connections */
  254. struct smc_link_group *lgr;
  255. lgr = container_of(link, struct smc_link_group,
  256. lnk[SMC_SINGLE_LINK]);
  257. smc_lgr_terminate(lgr);
  258. return -EPIPE;
  259. }
  260. if (rc == -ERESTARTSYS)
  261. return -EINTR;
  262. switch (link->wr_reg_state) {
  263. case CONFIRMED:
  264. rc = 0;
  265. break;
  266. case FAILED:
  267. rc = -EIO;
  268. break;
  269. case POSTED:
  270. rc = -EPIPE;
  271. break;
  272. }
  273. return rc;
  274. }
  275. void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
  276. smc_wr_tx_filter filter,
  277. smc_wr_tx_dismisser dismisser,
  278. unsigned long data)
  279. {
  280. struct smc_wr_tx_pend_priv *tx_pend;
  281. struct smc_wr_rx_hdr *wr_tx;
  282. int i;
  283. for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
  284. wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
  285. if (wr_tx->type != wr_tx_hdr_type)
  286. continue;
  287. tx_pend = &link->wr_tx_pends[i].priv;
  288. if (filter(tx_pend, data))
  289. dismisser(tx_pend);
  290. }
  291. }
  292. /****************************** receive queue ********************************/
  293. int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
  294. {
  295. struct smc_wr_rx_handler *h_iter;
  296. int rc = 0;
  297. spin_lock(&smc_wr_rx_hash_lock);
  298. hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
  299. if (h_iter->type == handler->type) {
  300. rc = -EEXIST;
  301. goto out_unlock;
  302. }
  303. }
  304. hash_add(smc_wr_rx_hash, &handler->list, handler->type);
  305. out_unlock:
  306. spin_unlock(&smc_wr_rx_hash_lock);
  307. return rc;
  308. }
  309. /* Demultiplex a received work request based on the message type to its handler.
  310. * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
  311. * and not being modified any more afterwards so we don't need to lock it.
  312. */
  313. static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
  314. {
  315. struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
  316. struct smc_wr_rx_handler *handler;
  317. struct smc_wr_rx_hdr *wr_rx;
  318. u64 temp_wr_id;
  319. u32 index;
  320. if (wc->byte_len < sizeof(*wr_rx))
  321. return; /* short message */
  322. temp_wr_id = wc->wr_id;
  323. index = do_div(temp_wr_id, link->wr_rx_cnt);
  324. wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
  325. hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
  326. if (handler->type == wr_rx->type)
  327. handler->handler(wc, wr_rx);
  328. }
  329. }
  330. static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
  331. {
  332. struct smc_link *link;
  333. int i;
  334. for (i = 0; i < num; i++) {
  335. link = wc[i].qp->qp_context;
  336. if (wc[i].status == IB_WC_SUCCESS) {
  337. link->wr_rx_tstamp = jiffies;
  338. smc_wr_rx_demultiplex(&wc[i]);
  339. smc_wr_rx_post(link); /* refill WR RX */
  340. } else {
  341. struct smc_link_group *lgr;
  342. /* handle status errors */
  343. switch (wc[i].status) {
  344. case IB_WC_RETRY_EXC_ERR:
  345. case IB_WC_RNR_RETRY_EXC_ERR:
  346. case IB_WC_WR_FLUSH_ERR:
  347. /* terminate connections of this link group
  348. * abnormally
  349. */
  350. lgr = container_of(link, struct smc_link_group,
  351. lnk[SMC_SINGLE_LINK]);
  352. smc_lgr_terminate(lgr);
  353. break;
  354. default:
  355. smc_wr_rx_post(link); /* refill WR RX */
  356. break;
  357. }
  358. }
  359. }
  360. }
  361. static void smc_wr_rx_tasklet_fn(unsigned long data)
  362. {
  363. struct smc_ib_device *dev = (struct smc_ib_device *)data;
  364. struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
  365. int polled = 0;
  366. int rc;
  367. again:
  368. polled++;
  369. do {
  370. memset(&wc, 0, sizeof(wc));
  371. rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
  372. if (polled == 1) {
  373. ib_req_notify_cq(dev->roce_cq_recv,
  374. IB_CQ_SOLICITED_MASK
  375. | IB_CQ_REPORT_MISSED_EVENTS);
  376. }
  377. if (!rc)
  378. break;
  379. smc_wr_rx_process_cqes(&wc[0], rc);
  380. } while (rc > 0);
  381. if (polled == 1)
  382. goto again;
  383. }
  384. void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
  385. {
  386. struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
  387. tasklet_schedule(&dev->recv_tasklet);
  388. }
  389. int smc_wr_rx_post_init(struct smc_link *link)
  390. {
  391. u32 i;
  392. int rc = 0;
  393. for (i = 0; i < link->wr_rx_cnt; i++)
  394. rc = smc_wr_rx_post(link);
  395. return rc;
  396. }
  397. /***************************** init, exit, misc ******************************/
  398. void smc_wr_remember_qp_attr(struct smc_link *lnk)
  399. {
  400. struct ib_qp_attr *attr = &lnk->qp_attr;
  401. struct ib_qp_init_attr init_attr;
  402. memset(attr, 0, sizeof(*attr));
  403. memset(&init_attr, 0, sizeof(init_attr));
  404. ib_query_qp(lnk->roce_qp, attr,
  405. IB_QP_STATE |
  406. IB_QP_CUR_STATE |
  407. IB_QP_PKEY_INDEX |
  408. IB_QP_PORT |
  409. IB_QP_QKEY |
  410. IB_QP_AV |
  411. IB_QP_PATH_MTU |
  412. IB_QP_TIMEOUT |
  413. IB_QP_RETRY_CNT |
  414. IB_QP_RNR_RETRY |
  415. IB_QP_RQ_PSN |
  416. IB_QP_ALT_PATH |
  417. IB_QP_MIN_RNR_TIMER |
  418. IB_QP_SQ_PSN |
  419. IB_QP_PATH_MIG_STATE |
  420. IB_QP_CAP |
  421. IB_QP_DEST_QPN,
  422. &init_attr);
  423. lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
  424. lnk->qp_attr.cap.max_send_wr);
  425. lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
  426. lnk->qp_attr.cap.max_recv_wr);
  427. }
  428. static void smc_wr_init_sge(struct smc_link *lnk)
  429. {
  430. u32 i;
  431. for (i = 0; i < lnk->wr_tx_cnt; i++) {
  432. lnk->wr_tx_sges[i].addr =
  433. lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
  434. lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
  435. lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
  436. lnk->wr_tx_ibs[i].next = NULL;
  437. lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
  438. lnk->wr_tx_ibs[i].num_sge = 1;
  439. lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
  440. lnk->wr_tx_ibs[i].send_flags =
  441. IB_SEND_SIGNALED | IB_SEND_SOLICITED;
  442. }
  443. for (i = 0; i < lnk->wr_rx_cnt; i++) {
  444. lnk->wr_rx_sges[i].addr =
  445. lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
  446. lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
  447. lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
  448. lnk->wr_rx_ibs[i].next = NULL;
  449. lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
  450. lnk->wr_rx_ibs[i].num_sge = 1;
  451. }
  452. lnk->wr_reg.wr.next = NULL;
  453. lnk->wr_reg.wr.num_sge = 0;
  454. lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
  455. lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
  456. lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
  457. }
  458. void smc_wr_free_link(struct smc_link *lnk)
  459. {
  460. struct ib_device *ibdev;
  461. memset(lnk->wr_tx_mask, 0,
  462. BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
  463. if (!lnk->smcibdev)
  464. return;
  465. ibdev = lnk->smcibdev->ibdev;
  466. if (lnk->wr_rx_dma_addr) {
  467. ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
  468. SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
  469. DMA_FROM_DEVICE);
  470. lnk->wr_rx_dma_addr = 0;
  471. }
  472. if (lnk->wr_tx_dma_addr) {
  473. ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
  474. SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
  475. DMA_TO_DEVICE);
  476. lnk->wr_tx_dma_addr = 0;
  477. }
  478. }
  479. void smc_wr_free_link_mem(struct smc_link *lnk)
  480. {
  481. kfree(lnk->wr_tx_pends);
  482. lnk->wr_tx_pends = NULL;
  483. kfree(lnk->wr_tx_mask);
  484. lnk->wr_tx_mask = NULL;
  485. kfree(lnk->wr_tx_sges);
  486. lnk->wr_tx_sges = NULL;
  487. kfree(lnk->wr_rx_sges);
  488. lnk->wr_rx_sges = NULL;
  489. kfree(lnk->wr_rx_ibs);
  490. lnk->wr_rx_ibs = NULL;
  491. kfree(lnk->wr_tx_ibs);
  492. lnk->wr_tx_ibs = NULL;
  493. kfree(lnk->wr_tx_bufs);
  494. lnk->wr_tx_bufs = NULL;
  495. kfree(lnk->wr_rx_bufs);
  496. lnk->wr_rx_bufs = NULL;
  497. }
  498. int smc_wr_alloc_link_mem(struct smc_link *link)
  499. {
  500. /* allocate link related memory */
  501. link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
  502. if (!link->wr_tx_bufs)
  503. goto no_mem;
  504. link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
  505. GFP_KERNEL);
  506. if (!link->wr_rx_bufs)
  507. goto no_mem_wr_tx_bufs;
  508. link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
  509. GFP_KERNEL);
  510. if (!link->wr_tx_ibs)
  511. goto no_mem_wr_rx_bufs;
  512. link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
  513. sizeof(link->wr_rx_ibs[0]),
  514. GFP_KERNEL);
  515. if (!link->wr_rx_ibs)
  516. goto no_mem_wr_tx_ibs;
  517. link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
  518. GFP_KERNEL);
  519. if (!link->wr_tx_sges)
  520. goto no_mem_wr_rx_ibs;
  521. link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
  522. sizeof(link->wr_rx_sges[0]),
  523. GFP_KERNEL);
  524. if (!link->wr_rx_sges)
  525. goto no_mem_wr_tx_sges;
  526. link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
  527. sizeof(*link->wr_tx_mask),
  528. GFP_KERNEL);
  529. if (!link->wr_tx_mask)
  530. goto no_mem_wr_rx_sges;
  531. link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
  532. sizeof(link->wr_tx_pends[0]),
  533. GFP_KERNEL);
  534. if (!link->wr_tx_pends)
  535. goto no_mem_wr_tx_mask;
  536. return 0;
  537. no_mem_wr_tx_mask:
  538. kfree(link->wr_tx_mask);
  539. no_mem_wr_rx_sges:
  540. kfree(link->wr_rx_sges);
  541. no_mem_wr_tx_sges:
  542. kfree(link->wr_tx_sges);
  543. no_mem_wr_rx_ibs:
  544. kfree(link->wr_rx_ibs);
  545. no_mem_wr_tx_ibs:
  546. kfree(link->wr_tx_ibs);
  547. no_mem_wr_rx_bufs:
  548. kfree(link->wr_rx_bufs);
  549. no_mem_wr_tx_bufs:
  550. kfree(link->wr_tx_bufs);
  551. no_mem:
  552. return -ENOMEM;
  553. }
  554. void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
  555. {
  556. tasklet_kill(&smcibdev->recv_tasklet);
  557. tasklet_kill(&smcibdev->send_tasklet);
  558. }
  559. void smc_wr_add_dev(struct smc_ib_device *smcibdev)
  560. {
  561. tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn,
  562. (unsigned long)smcibdev);
  563. tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn,
  564. (unsigned long)smcibdev);
  565. }
  566. int smc_wr_create_link(struct smc_link *lnk)
  567. {
  568. struct ib_device *ibdev = lnk->smcibdev->ibdev;
  569. int rc = 0;
  570. smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
  571. lnk->wr_rx_id = 0;
  572. lnk->wr_rx_dma_addr = ib_dma_map_single(
  573. ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
  574. DMA_FROM_DEVICE);
  575. if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
  576. lnk->wr_rx_dma_addr = 0;
  577. rc = -EIO;
  578. goto out;
  579. }
  580. lnk->wr_tx_dma_addr = ib_dma_map_single(
  581. ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
  582. DMA_TO_DEVICE);
  583. if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
  584. rc = -EIO;
  585. goto dma_unmap;
  586. }
  587. smc_wr_init_sge(lnk);
  588. memset(lnk->wr_tx_mask, 0,
  589. BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
  590. init_waitqueue_head(&lnk->wr_tx_wait);
  591. init_waitqueue_head(&lnk->wr_reg_wait);
  592. return rc;
  593. dma_unmap:
  594. ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
  595. SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
  596. DMA_FROM_DEVICE);
  597. lnk->wr_rx_dma_addr = 0;
  598. out:
  599. return rc;
  600. }