rxe_qp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/skbuff.h>
  34. #include <linux/delay.h>
  35. #include <linux/sched.h>
  36. #include "rxe.h"
  37. #include "rxe_loc.h"
  38. #include "rxe_queue.h"
  39. #include "rxe_task.h"
  40. char *rxe_qp_state_name[] = {
  41. [QP_STATE_RESET] = "RESET",
  42. [QP_STATE_INIT] = "INIT",
  43. [QP_STATE_READY] = "READY",
  44. [QP_STATE_DRAIN] = "DRAIN",
  45. [QP_STATE_DRAINED] = "DRAINED",
  46. [QP_STATE_ERROR] = "ERROR",
  47. };
  48. static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
  49. int has_srq)
  50. {
  51. if (cap->max_send_wr > rxe->attr.max_qp_wr) {
  52. pr_warn("invalid send wr = %d > %d\n",
  53. cap->max_send_wr, rxe->attr.max_qp_wr);
  54. goto err1;
  55. }
  56. if (cap->max_send_sge > rxe->attr.max_sge) {
  57. pr_warn("invalid send sge = %d > %d\n",
  58. cap->max_send_sge, rxe->attr.max_sge);
  59. goto err1;
  60. }
  61. if (!has_srq) {
  62. if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
  63. pr_warn("invalid recv wr = %d > %d\n",
  64. cap->max_recv_wr, rxe->attr.max_qp_wr);
  65. goto err1;
  66. }
  67. if (cap->max_recv_sge > rxe->attr.max_sge) {
  68. pr_warn("invalid recv sge = %d > %d\n",
  69. cap->max_recv_sge, rxe->attr.max_sge);
  70. goto err1;
  71. }
  72. }
  73. if (cap->max_inline_data > rxe->max_inline_data) {
  74. pr_warn("invalid max inline data = %d > %d\n",
  75. cap->max_inline_data, rxe->max_inline_data);
  76. goto err1;
  77. }
  78. return 0;
  79. err1:
  80. return -EINVAL;
  81. }
  82. int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
  83. {
  84. struct ib_qp_cap *cap = &init->cap;
  85. struct rxe_port *port;
  86. int port_num = init->port_num;
  87. if (!init->recv_cq || !init->send_cq) {
  88. pr_warn("missing cq\n");
  89. goto err1;
  90. }
  91. if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
  92. goto err1;
  93. if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
  94. if (port_num != 1) {
  95. pr_warn("invalid port = %d\n", port_num);
  96. goto err1;
  97. }
  98. port = &rxe->port;
  99. if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
  100. pr_warn("SMI QP exists for port %d\n", port_num);
  101. goto err1;
  102. }
  103. if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
  104. pr_warn("GSI QP exists for port %d\n", port_num);
  105. goto err1;
  106. }
  107. }
  108. return 0;
  109. err1:
  110. return -EINVAL;
  111. }
  112. static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
  113. {
  114. qp->resp.res_head = 0;
  115. qp->resp.res_tail = 0;
  116. qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
  117. if (!qp->resp.resources)
  118. return -ENOMEM;
  119. return 0;
  120. }
  121. static void free_rd_atomic_resources(struct rxe_qp *qp)
  122. {
  123. if (qp->resp.resources) {
  124. int i;
  125. for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
  126. struct resp_res *res = &qp->resp.resources[i];
  127. free_rd_atomic_resource(qp, res);
  128. }
  129. kfree(qp->resp.resources);
  130. qp->resp.resources = NULL;
  131. }
  132. }
  133. void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
  134. {
  135. if (res->type == RXE_ATOMIC_MASK) {
  136. rxe_drop_ref(qp);
  137. kfree_skb(res->atomic.skb);
  138. } else if (res->type == RXE_READ_MASK) {
  139. if (res->read.mr)
  140. rxe_drop_ref(res->read.mr);
  141. }
  142. res->type = 0;
  143. }
  144. static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
  145. {
  146. int i;
  147. struct resp_res *res;
  148. if (qp->resp.resources) {
  149. for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
  150. res = &qp->resp.resources[i];
  151. free_rd_atomic_resource(qp, res);
  152. }
  153. }
  154. }
  155. static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
  156. struct ib_qp_init_attr *init)
  157. {
  158. struct rxe_port *port;
  159. u32 qpn;
  160. qp->sq_sig_type = init->sq_sig_type;
  161. qp->attr.path_mtu = 1;
  162. qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
  163. qpn = qp->pelem.index;
  164. port = &rxe->port;
  165. switch (init->qp_type) {
  166. case IB_QPT_SMI:
  167. qp->ibqp.qp_num = 0;
  168. port->qp_smi_index = qpn;
  169. qp->attr.port_num = init->port_num;
  170. break;
  171. case IB_QPT_GSI:
  172. qp->ibqp.qp_num = 1;
  173. port->qp_gsi_index = qpn;
  174. qp->attr.port_num = init->port_num;
  175. break;
  176. default:
  177. qp->ibqp.qp_num = qpn;
  178. break;
  179. }
  180. INIT_LIST_HEAD(&qp->grp_list);
  181. skb_queue_head_init(&qp->send_pkts);
  182. spin_lock_init(&qp->grp_lock);
  183. spin_lock_init(&qp->state_lock);
  184. atomic_set(&qp->ssn, 0);
  185. atomic_set(&qp->skb_out, 0);
  186. }
  187. static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
  188. struct ib_qp_init_attr *init,
  189. struct ib_ucontext *context, struct ib_udata *udata)
  190. {
  191. int err;
  192. int wqe_size;
  193. err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
  194. if (err < 0)
  195. return err;
  196. qp->sk->sk->sk_user_data = qp;
  197. qp->sq.max_wr = init->cap.max_send_wr;
  198. qp->sq.max_sge = init->cap.max_send_sge;
  199. qp->sq.max_inline = init->cap.max_inline_data;
  200. wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
  201. qp->sq.max_sge * sizeof(struct ib_sge),
  202. sizeof(struct rxe_send_wqe) +
  203. qp->sq.max_inline);
  204. qp->sq.queue = rxe_queue_init(rxe,
  205. &qp->sq.max_wr,
  206. wqe_size);
  207. if (!qp->sq.queue)
  208. return -ENOMEM;
  209. err = do_mmap_info(rxe, udata, true,
  210. context, qp->sq.queue->buf,
  211. qp->sq.queue->buf_size, &qp->sq.queue->ip);
  212. if (err) {
  213. kvfree(qp->sq.queue->buf);
  214. kfree(qp->sq.queue);
  215. return err;
  216. }
  217. qp->req.wqe_index = producer_index(qp->sq.queue);
  218. qp->req.state = QP_STATE_RESET;
  219. qp->req.opcode = -1;
  220. qp->comp.opcode = -1;
  221. spin_lock_init(&qp->sq.sq_lock);
  222. skb_queue_head_init(&qp->req_pkts);
  223. rxe_init_task(rxe, &qp->req.task, qp,
  224. rxe_requester, "req");
  225. rxe_init_task(rxe, &qp->comp.task, qp,
  226. rxe_completer, "comp");
  227. setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
  228. setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
  229. qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
  230. return 0;
  231. }
  232. static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
  233. struct ib_qp_init_attr *init,
  234. struct ib_ucontext *context, struct ib_udata *udata)
  235. {
  236. int err;
  237. int wqe_size;
  238. if (!qp->srq) {
  239. qp->rq.max_wr = init->cap.max_recv_wr;
  240. qp->rq.max_sge = init->cap.max_recv_sge;
  241. wqe_size = rcv_wqe_size(qp->rq.max_sge);
  242. pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
  243. qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
  244. qp->rq.queue = rxe_queue_init(rxe,
  245. &qp->rq.max_wr,
  246. wqe_size);
  247. if (!qp->rq.queue)
  248. return -ENOMEM;
  249. err = do_mmap_info(rxe, udata, false, context,
  250. qp->rq.queue->buf,
  251. qp->rq.queue->buf_size,
  252. &qp->rq.queue->ip);
  253. if (err) {
  254. kvfree(qp->rq.queue->buf);
  255. kfree(qp->rq.queue);
  256. return err;
  257. }
  258. }
  259. spin_lock_init(&qp->rq.producer_lock);
  260. spin_lock_init(&qp->rq.consumer_lock);
  261. skb_queue_head_init(&qp->resp_pkts);
  262. rxe_init_task(rxe, &qp->resp.task, qp,
  263. rxe_responder, "resp");
  264. qp->resp.opcode = OPCODE_NONE;
  265. qp->resp.msn = 0;
  266. qp->resp.state = QP_STATE_RESET;
  267. return 0;
  268. }
  269. /* called by the create qp verb */
  270. int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
  271. struct ib_qp_init_attr *init, struct ib_udata *udata,
  272. struct ib_pd *ibpd)
  273. {
  274. int err;
  275. struct rxe_cq *rcq = to_rcq(init->recv_cq);
  276. struct rxe_cq *scq = to_rcq(init->send_cq);
  277. struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
  278. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  279. rxe_add_ref(pd);
  280. rxe_add_ref(rcq);
  281. rxe_add_ref(scq);
  282. if (srq)
  283. rxe_add_ref(srq);
  284. qp->pd = pd;
  285. qp->rcq = rcq;
  286. qp->scq = scq;
  287. qp->srq = srq;
  288. rxe_qp_init_misc(rxe, qp, init);
  289. err = rxe_qp_init_req(rxe, qp, init, context, udata);
  290. if (err)
  291. goto err1;
  292. err = rxe_qp_init_resp(rxe, qp, init, context, udata);
  293. if (err)
  294. goto err2;
  295. qp->attr.qp_state = IB_QPS_RESET;
  296. qp->valid = 1;
  297. return 0;
  298. err2:
  299. rxe_queue_cleanup(qp->sq.queue);
  300. err1:
  301. if (srq)
  302. rxe_drop_ref(srq);
  303. rxe_drop_ref(scq);
  304. rxe_drop_ref(rcq);
  305. rxe_drop_ref(pd);
  306. return err;
  307. }
  308. /* called by the query qp verb */
  309. int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
  310. {
  311. init->event_handler = qp->ibqp.event_handler;
  312. init->qp_context = qp->ibqp.qp_context;
  313. init->send_cq = qp->ibqp.send_cq;
  314. init->recv_cq = qp->ibqp.recv_cq;
  315. init->srq = qp->ibqp.srq;
  316. init->cap.max_send_wr = qp->sq.max_wr;
  317. init->cap.max_send_sge = qp->sq.max_sge;
  318. init->cap.max_inline_data = qp->sq.max_inline;
  319. if (!qp->srq) {
  320. init->cap.max_recv_wr = qp->rq.max_wr;
  321. init->cap.max_recv_sge = qp->rq.max_sge;
  322. }
  323. init->sq_sig_type = qp->sq_sig_type;
  324. init->qp_type = qp->ibqp.qp_type;
  325. init->port_num = 1;
  326. return 0;
  327. }
  328. /* called by the modify qp verb, this routine checks all the parameters before
  329. * making any changes
  330. */
  331. int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
  332. struct ib_qp_attr *attr, int mask)
  333. {
  334. enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
  335. attr->cur_qp_state : qp->attr.qp_state;
  336. enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
  337. attr->qp_state : cur_state;
  338. if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
  339. IB_LINK_LAYER_ETHERNET)) {
  340. pr_warn("invalid mask or state for qp\n");
  341. goto err1;
  342. }
  343. if (mask & IB_QP_STATE) {
  344. if (cur_state == IB_QPS_SQD) {
  345. if (qp->req.state == QP_STATE_DRAIN &&
  346. new_state != IB_QPS_ERR)
  347. goto err1;
  348. }
  349. }
  350. if (mask & IB_QP_PORT) {
  351. if (attr->port_num != 1) {
  352. pr_warn("invalid port %d\n", attr->port_num);
  353. goto err1;
  354. }
  355. }
  356. if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
  357. goto err1;
  358. if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
  359. goto err1;
  360. if (mask & IB_QP_ALT_PATH) {
  361. if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
  362. goto err1;
  363. if (attr->alt_port_num != 1) {
  364. pr_warn("invalid alt port %d\n", attr->alt_port_num);
  365. goto err1;
  366. }
  367. if (attr->alt_timeout > 31) {
  368. pr_warn("invalid QP alt timeout %d > 31\n",
  369. attr->alt_timeout);
  370. goto err1;
  371. }
  372. }
  373. if (mask & IB_QP_PATH_MTU) {
  374. struct rxe_port *port = &rxe->port;
  375. enum ib_mtu max_mtu = port->attr.max_mtu;
  376. enum ib_mtu mtu = attr->path_mtu;
  377. if (mtu > max_mtu) {
  378. pr_debug("invalid mtu (%d) > (%d)\n",
  379. ib_mtu_enum_to_int(mtu),
  380. ib_mtu_enum_to_int(max_mtu));
  381. goto err1;
  382. }
  383. }
  384. if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
  385. if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
  386. pr_warn("invalid max_rd_atomic %d > %d\n",
  387. attr->max_rd_atomic,
  388. rxe->attr.max_qp_rd_atom);
  389. goto err1;
  390. }
  391. }
  392. if (mask & IB_QP_TIMEOUT) {
  393. if (attr->timeout > 31) {
  394. pr_warn("invalid QP timeout %d > 31\n",
  395. attr->timeout);
  396. goto err1;
  397. }
  398. }
  399. return 0;
  400. err1:
  401. return -EINVAL;
  402. }
  403. /* move the qp to the reset state */
  404. static void rxe_qp_reset(struct rxe_qp *qp)
  405. {
  406. /* stop tasks from running */
  407. rxe_disable_task(&qp->resp.task);
  408. /* stop request/comp */
  409. if (qp->sq.queue) {
  410. if (qp_type(qp) == IB_QPT_RC)
  411. rxe_disable_task(&qp->comp.task);
  412. rxe_disable_task(&qp->req.task);
  413. }
  414. /* move qp to the reset state */
  415. qp->req.state = QP_STATE_RESET;
  416. qp->resp.state = QP_STATE_RESET;
  417. /* let state machines reset themselves drain work and packet queues
  418. * etc.
  419. */
  420. __rxe_do_task(&qp->resp.task);
  421. if (qp->sq.queue) {
  422. __rxe_do_task(&qp->comp.task);
  423. __rxe_do_task(&qp->req.task);
  424. rxe_queue_reset(qp->sq.queue);
  425. }
  426. /* cleanup attributes */
  427. atomic_set(&qp->ssn, 0);
  428. qp->req.opcode = -1;
  429. qp->req.need_retry = 0;
  430. qp->req.noack_pkts = 0;
  431. qp->resp.msn = 0;
  432. qp->resp.opcode = -1;
  433. qp->resp.drop_msg = 0;
  434. qp->resp.goto_error = 0;
  435. qp->resp.sent_psn_nak = 0;
  436. if (qp->resp.mr) {
  437. rxe_drop_ref(qp->resp.mr);
  438. qp->resp.mr = NULL;
  439. }
  440. cleanup_rd_atomic_resources(qp);
  441. /* reenable tasks */
  442. rxe_enable_task(&qp->resp.task);
  443. if (qp->sq.queue) {
  444. if (qp_type(qp) == IB_QPT_RC)
  445. rxe_enable_task(&qp->comp.task);
  446. rxe_enable_task(&qp->req.task);
  447. }
  448. }
  449. /* drain the send queue */
  450. static void rxe_qp_drain(struct rxe_qp *qp)
  451. {
  452. if (qp->sq.queue) {
  453. if (qp->req.state != QP_STATE_DRAINED) {
  454. qp->req.state = QP_STATE_DRAIN;
  455. if (qp_type(qp) == IB_QPT_RC)
  456. rxe_run_task(&qp->comp.task, 1);
  457. else
  458. __rxe_do_task(&qp->comp.task);
  459. rxe_run_task(&qp->req.task, 1);
  460. }
  461. }
  462. }
  463. /* move the qp to the error state */
  464. void rxe_qp_error(struct rxe_qp *qp)
  465. {
  466. qp->req.state = QP_STATE_ERROR;
  467. qp->resp.state = QP_STATE_ERROR;
  468. qp->attr.qp_state = IB_QPS_ERR;
  469. /* drain work and packet queues */
  470. rxe_run_task(&qp->resp.task, 1);
  471. if (qp_type(qp) == IB_QPT_RC)
  472. rxe_run_task(&qp->comp.task, 1);
  473. else
  474. __rxe_do_task(&qp->comp.task);
  475. rxe_run_task(&qp->req.task, 1);
  476. }
  477. /* called by the modify qp verb */
  478. int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
  479. struct ib_udata *udata)
  480. {
  481. int err;
  482. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  483. union ib_gid sgid;
  484. struct ib_gid_attr sgid_attr;
  485. if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
  486. int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
  487. qp->attr.max_rd_atomic = max_rd_atomic;
  488. atomic_set(&qp->req.rd_atomic, max_rd_atomic);
  489. }
  490. if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  491. int max_dest_rd_atomic =
  492. __roundup_pow_of_two(attr->max_dest_rd_atomic);
  493. qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
  494. free_rd_atomic_resources(qp);
  495. err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
  496. if (err)
  497. return err;
  498. }
  499. if (mask & IB_QP_CUR_STATE)
  500. qp->attr.cur_qp_state = attr->qp_state;
  501. if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
  502. qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
  503. if (mask & IB_QP_ACCESS_FLAGS)
  504. qp->attr.qp_access_flags = attr->qp_access_flags;
  505. if (mask & IB_QP_PKEY_INDEX)
  506. qp->attr.pkey_index = attr->pkey_index;
  507. if (mask & IB_QP_PORT)
  508. qp->attr.port_num = attr->port_num;
  509. if (mask & IB_QP_QKEY)
  510. qp->attr.qkey = attr->qkey;
  511. if (mask & IB_QP_AV) {
  512. ib_get_cached_gid(&rxe->ib_dev, 1,
  513. attr->ah_attr.grh.sgid_index, &sgid,
  514. &sgid_attr);
  515. rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
  516. &attr->ah_attr);
  517. rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
  518. &sgid_attr, &sgid);
  519. if (sgid_attr.ndev)
  520. dev_put(sgid_attr.ndev);
  521. }
  522. if (mask & IB_QP_ALT_PATH) {
  523. ib_get_cached_gid(&rxe->ib_dev, 1,
  524. attr->alt_ah_attr.grh.sgid_index, &sgid,
  525. &sgid_attr);
  526. rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
  527. &attr->alt_ah_attr);
  528. rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
  529. &sgid_attr, &sgid);
  530. if (sgid_attr.ndev)
  531. dev_put(sgid_attr.ndev);
  532. qp->attr.alt_port_num = attr->alt_port_num;
  533. qp->attr.alt_pkey_index = attr->alt_pkey_index;
  534. qp->attr.alt_timeout = attr->alt_timeout;
  535. }
  536. if (mask & IB_QP_PATH_MTU) {
  537. qp->attr.path_mtu = attr->path_mtu;
  538. qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
  539. }
  540. if (mask & IB_QP_TIMEOUT) {
  541. qp->attr.timeout = attr->timeout;
  542. if (attr->timeout == 0) {
  543. qp->qp_timeout_jiffies = 0;
  544. } else {
  545. /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
  546. int j = nsecs_to_jiffies(4096ULL << attr->timeout);
  547. qp->qp_timeout_jiffies = j ? j : 1;
  548. }
  549. }
  550. if (mask & IB_QP_RETRY_CNT) {
  551. qp->attr.retry_cnt = attr->retry_cnt;
  552. qp->comp.retry_cnt = attr->retry_cnt;
  553. pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
  554. attr->retry_cnt);
  555. }
  556. if (mask & IB_QP_RNR_RETRY) {
  557. qp->attr.rnr_retry = attr->rnr_retry;
  558. qp->comp.rnr_retry = attr->rnr_retry;
  559. pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
  560. attr->rnr_retry);
  561. }
  562. if (mask & IB_QP_RQ_PSN) {
  563. qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
  564. qp->resp.psn = qp->attr.rq_psn;
  565. pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
  566. qp->resp.psn);
  567. }
  568. if (mask & IB_QP_MIN_RNR_TIMER) {
  569. qp->attr.min_rnr_timer = attr->min_rnr_timer;
  570. pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
  571. attr->min_rnr_timer);
  572. }
  573. if (mask & IB_QP_SQ_PSN) {
  574. qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
  575. qp->req.psn = qp->attr.sq_psn;
  576. qp->comp.psn = qp->attr.sq_psn;
  577. pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
  578. }
  579. if (mask & IB_QP_PATH_MIG_STATE)
  580. qp->attr.path_mig_state = attr->path_mig_state;
  581. if (mask & IB_QP_DEST_QPN)
  582. qp->attr.dest_qp_num = attr->dest_qp_num;
  583. if (mask & IB_QP_STATE) {
  584. qp->attr.qp_state = attr->qp_state;
  585. switch (attr->qp_state) {
  586. case IB_QPS_RESET:
  587. pr_debug("qp#%d state -> RESET\n", qp_num(qp));
  588. rxe_qp_reset(qp);
  589. break;
  590. case IB_QPS_INIT:
  591. pr_debug("qp#%d state -> INIT\n", qp_num(qp));
  592. qp->req.state = QP_STATE_INIT;
  593. qp->resp.state = QP_STATE_INIT;
  594. break;
  595. case IB_QPS_RTR:
  596. pr_debug("qp#%d state -> RTR\n", qp_num(qp));
  597. qp->resp.state = QP_STATE_READY;
  598. break;
  599. case IB_QPS_RTS:
  600. pr_debug("qp#%d state -> RTS\n", qp_num(qp));
  601. qp->req.state = QP_STATE_READY;
  602. break;
  603. case IB_QPS_SQD:
  604. pr_debug("qp#%d state -> SQD\n", qp_num(qp));
  605. rxe_qp_drain(qp);
  606. break;
  607. case IB_QPS_SQE:
  608. pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
  609. /* Not possible from modify_qp. */
  610. break;
  611. case IB_QPS_ERR:
  612. pr_debug("qp#%d state -> ERR\n", qp_num(qp));
  613. rxe_qp_error(qp);
  614. break;
  615. }
  616. }
  617. return 0;
  618. }
  619. /* called by the query qp verb */
  620. int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
  621. {
  622. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  623. *attr = qp->attr;
  624. attr->rq_psn = qp->resp.psn;
  625. attr->sq_psn = qp->req.psn;
  626. attr->cap.max_send_wr = qp->sq.max_wr;
  627. attr->cap.max_send_sge = qp->sq.max_sge;
  628. attr->cap.max_inline_data = qp->sq.max_inline;
  629. if (!qp->srq) {
  630. attr->cap.max_recv_wr = qp->rq.max_wr;
  631. attr->cap.max_recv_sge = qp->rq.max_sge;
  632. }
  633. rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
  634. rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
  635. if (qp->req.state == QP_STATE_DRAIN) {
  636. attr->sq_draining = 1;
  637. /* applications that get this state
  638. * typically spin on it. yield the
  639. * processor
  640. */
  641. cond_resched();
  642. } else {
  643. attr->sq_draining = 0;
  644. }
  645. pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
  646. return 0;
  647. }
  648. /* called by the destroy qp verb */
  649. void rxe_qp_destroy(struct rxe_qp *qp)
  650. {
  651. qp->valid = 0;
  652. qp->qp_timeout_jiffies = 0;
  653. rxe_cleanup_task(&qp->resp.task);
  654. del_timer_sync(&qp->retrans_timer);
  655. del_timer_sync(&qp->rnr_nak_timer);
  656. rxe_cleanup_task(&qp->req.task);
  657. rxe_cleanup_task(&qp->comp.task);
  658. /* flush out any receive wr's or pending requests */
  659. __rxe_do_task(&qp->req.task);
  660. if (qp->sq.queue) {
  661. __rxe_do_task(&qp->comp.task);
  662. __rxe_do_task(&qp->req.task);
  663. }
  664. }
  665. /* called when the last reference to the qp is dropped */
  666. void rxe_qp_cleanup(struct rxe_pool_entry *arg)
  667. {
  668. struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
  669. rxe_drop_all_mcast_groups(qp);
  670. if (qp->sq.queue)
  671. rxe_queue_cleanup(qp->sq.queue);
  672. if (qp->srq)
  673. rxe_drop_ref(qp->srq);
  674. if (qp->rq.queue)
  675. rxe_queue_cleanup(qp->rq.queue);
  676. if (qp->scq)
  677. rxe_drop_ref(qp->scq);
  678. if (qp->rcq)
  679. rxe_drop_ref(qp->rcq);
  680. if (qp->pd)
  681. rxe_drop_ref(qp->pd);
  682. if (qp->resp.mr) {
  683. rxe_drop_ref(qp->resp.mr);
  684. qp->resp.mr = NULL;
  685. }
  686. free_rd_atomic_resources(qp);
  687. kernel_sock_shutdown(qp->sk, SHUT_RDWR);
  688. sock_release(qp->sk);
  689. }