rxe_qp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/skbuff.h>
  34. #include <linux/delay.h>
  35. #include <linux/sched.h>
  36. #include "rxe.h"
  37. #include "rxe_loc.h"
  38. #include "rxe_queue.h"
  39. #include "rxe_task.h"
  40. static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
  41. int has_srq)
  42. {
  43. if (cap->max_send_wr > rxe->attr.max_qp_wr) {
  44. pr_warn("invalid send wr = %d > %d\n",
  45. cap->max_send_wr, rxe->attr.max_qp_wr);
  46. goto err1;
  47. }
  48. if (cap->max_send_sge > rxe->attr.max_sge) {
  49. pr_warn("invalid send sge = %d > %d\n",
  50. cap->max_send_sge, rxe->attr.max_sge);
  51. goto err1;
  52. }
  53. if (!has_srq) {
  54. if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
  55. pr_warn("invalid recv wr = %d > %d\n",
  56. cap->max_recv_wr, rxe->attr.max_qp_wr);
  57. goto err1;
  58. }
  59. if (cap->max_recv_sge > rxe->attr.max_sge) {
  60. pr_warn("invalid recv sge = %d > %d\n",
  61. cap->max_recv_sge, rxe->attr.max_sge);
  62. goto err1;
  63. }
  64. }
  65. if (cap->max_inline_data > rxe->max_inline_data) {
  66. pr_warn("invalid max inline data = %d > %d\n",
  67. cap->max_inline_data, rxe->max_inline_data);
  68. goto err1;
  69. }
  70. return 0;
  71. err1:
  72. return -EINVAL;
  73. }
  74. int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
  75. {
  76. struct ib_qp_cap *cap = &init->cap;
  77. struct rxe_port *port;
  78. int port_num = init->port_num;
  79. if (!init->recv_cq || !init->send_cq) {
  80. pr_warn("missing cq\n");
  81. goto err1;
  82. }
  83. if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
  84. goto err1;
  85. if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
  86. if (port_num != 1) {
  87. pr_warn("invalid port = %d\n", port_num);
  88. goto err1;
  89. }
  90. port = &rxe->port;
  91. if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
  92. pr_warn("SMI QP exists for port %d\n", port_num);
  93. goto err1;
  94. }
  95. if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
  96. pr_warn("GSI QP exists for port %d\n", port_num);
  97. goto err1;
  98. }
  99. }
  100. return 0;
  101. err1:
  102. return -EINVAL;
  103. }
  104. static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
  105. {
  106. qp->resp.res_head = 0;
  107. qp->resp.res_tail = 0;
  108. qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
  109. if (!qp->resp.resources)
  110. return -ENOMEM;
  111. return 0;
  112. }
  113. static void free_rd_atomic_resources(struct rxe_qp *qp)
  114. {
  115. if (qp->resp.resources) {
  116. int i;
  117. for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
  118. struct resp_res *res = &qp->resp.resources[i];
  119. free_rd_atomic_resource(qp, res);
  120. }
  121. kfree(qp->resp.resources);
  122. qp->resp.resources = NULL;
  123. }
  124. }
  125. void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
  126. {
  127. if (res->type == RXE_ATOMIC_MASK) {
  128. rxe_drop_ref(qp);
  129. kfree_skb(res->atomic.skb);
  130. } else if (res->type == RXE_READ_MASK) {
  131. if (res->read.mr)
  132. rxe_drop_ref(res->read.mr);
  133. }
  134. res->type = 0;
  135. }
  136. static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
  137. {
  138. int i;
  139. struct resp_res *res;
  140. if (qp->resp.resources) {
  141. for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
  142. res = &qp->resp.resources[i];
  143. free_rd_atomic_resource(qp, res);
  144. }
  145. }
  146. }
  147. static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
  148. struct ib_qp_init_attr *init)
  149. {
  150. struct rxe_port *port;
  151. u32 qpn;
  152. qp->sq_sig_type = init->sq_sig_type;
  153. qp->attr.path_mtu = 1;
  154. qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
  155. qpn = qp->pelem.index;
  156. port = &rxe->port;
  157. switch (init->qp_type) {
  158. case IB_QPT_SMI:
  159. qp->ibqp.qp_num = 0;
  160. port->qp_smi_index = qpn;
  161. qp->attr.port_num = init->port_num;
  162. break;
  163. case IB_QPT_GSI:
  164. qp->ibqp.qp_num = 1;
  165. port->qp_gsi_index = qpn;
  166. qp->attr.port_num = init->port_num;
  167. break;
  168. default:
  169. qp->ibqp.qp_num = qpn;
  170. break;
  171. }
  172. INIT_LIST_HEAD(&qp->grp_list);
  173. skb_queue_head_init(&qp->send_pkts);
  174. spin_lock_init(&qp->grp_lock);
  175. spin_lock_init(&qp->state_lock);
  176. atomic_set(&qp->ssn, 0);
  177. atomic_set(&qp->skb_out, 0);
  178. }
  179. static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
  180. struct ib_qp_init_attr *init,
  181. struct ib_ucontext *context,
  182. struct rxe_create_qp_resp __user *uresp)
  183. {
  184. int err;
  185. int wqe_size;
  186. err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
  187. if (err < 0)
  188. return err;
  189. qp->sk->sk->sk_user_data = qp;
  190. qp->sq.max_wr = init->cap.max_send_wr;
  191. qp->sq.max_sge = init->cap.max_send_sge;
  192. qp->sq.max_inline = init->cap.max_inline_data;
  193. wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
  194. qp->sq.max_sge * sizeof(struct ib_sge),
  195. sizeof(struct rxe_send_wqe) +
  196. qp->sq.max_inline);
  197. qp->sq.queue = rxe_queue_init(rxe,
  198. &qp->sq.max_wr,
  199. wqe_size);
  200. if (!qp->sq.queue)
  201. return -ENOMEM;
  202. err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
  203. qp->sq.queue->buf, qp->sq.queue->buf_size,
  204. &qp->sq.queue->ip);
  205. if (err) {
  206. kvfree(qp->sq.queue->buf);
  207. kfree(qp->sq.queue);
  208. return err;
  209. }
  210. qp->req.wqe_index = producer_index(qp->sq.queue);
  211. qp->req.state = QP_STATE_RESET;
  212. qp->req.opcode = -1;
  213. qp->comp.opcode = -1;
  214. spin_lock_init(&qp->sq.sq_lock);
  215. skb_queue_head_init(&qp->req_pkts);
  216. rxe_init_task(rxe, &qp->req.task, qp,
  217. rxe_requester, "req");
  218. rxe_init_task(rxe, &qp->comp.task, qp,
  219. rxe_completer, "comp");
  220. qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
  221. if (init->qp_type == IB_QPT_RC) {
  222. timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
  223. timer_setup(&qp->retrans_timer, retransmit_timer, 0);
  224. }
  225. return 0;
  226. }
  227. static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
  228. struct ib_qp_init_attr *init,
  229. struct ib_ucontext *context,
  230. struct rxe_create_qp_resp __user *uresp)
  231. {
  232. int err;
  233. int wqe_size;
  234. if (!qp->srq) {
  235. qp->rq.max_wr = init->cap.max_recv_wr;
  236. qp->rq.max_sge = init->cap.max_recv_sge;
  237. wqe_size = rcv_wqe_size(qp->rq.max_sge);
  238. pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
  239. qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
  240. qp->rq.queue = rxe_queue_init(rxe,
  241. &qp->rq.max_wr,
  242. wqe_size);
  243. if (!qp->rq.queue)
  244. return -ENOMEM;
  245. err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
  246. qp->rq.queue->buf, qp->rq.queue->buf_size,
  247. &qp->rq.queue->ip);
  248. if (err) {
  249. kvfree(qp->rq.queue->buf);
  250. kfree(qp->rq.queue);
  251. return err;
  252. }
  253. }
  254. spin_lock_init(&qp->rq.producer_lock);
  255. spin_lock_init(&qp->rq.consumer_lock);
  256. skb_queue_head_init(&qp->resp_pkts);
  257. rxe_init_task(rxe, &qp->resp.task, qp,
  258. rxe_responder, "resp");
  259. qp->resp.opcode = OPCODE_NONE;
  260. qp->resp.msn = 0;
  261. qp->resp.state = QP_STATE_RESET;
  262. return 0;
  263. }
  264. /* called by the create qp verb */
  265. int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
  266. struct ib_qp_init_attr *init,
  267. struct rxe_create_qp_resp __user *uresp,
  268. struct ib_pd *ibpd)
  269. {
  270. int err;
  271. struct rxe_cq *rcq = to_rcq(init->recv_cq);
  272. struct rxe_cq *scq = to_rcq(init->send_cq);
  273. struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
  274. struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL;
  275. rxe_add_ref(pd);
  276. rxe_add_ref(rcq);
  277. rxe_add_ref(scq);
  278. if (srq)
  279. rxe_add_ref(srq);
  280. qp->pd = pd;
  281. qp->rcq = rcq;
  282. qp->scq = scq;
  283. qp->srq = srq;
  284. rxe_qp_init_misc(rxe, qp, init);
  285. err = rxe_qp_init_req(rxe, qp, init, context, uresp);
  286. if (err)
  287. goto err1;
  288. err = rxe_qp_init_resp(rxe, qp, init, context, uresp);
  289. if (err)
  290. goto err2;
  291. qp->attr.qp_state = IB_QPS_RESET;
  292. qp->valid = 1;
  293. return 0;
  294. err2:
  295. rxe_queue_cleanup(qp->sq.queue);
  296. err1:
  297. if (srq)
  298. rxe_drop_ref(srq);
  299. rxe_drop_ref(scq);
  300. rxe_drop_ref(rcq);
  301. rxe_drop_ref(pd);
  302. return err;
  303. }
  304. /* called by the query qp verb */
  305. int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
  306. {
  307. init->event_handler = qp->ibqp.event_handler;
  308. init->qp_context = qp->ibqp.qp_context;
  309. init->send_cq = qp->ibqp.send_cq;
  310. init->recv_cq = qp->ibqp.recv_cq;
  311. init->srq = qp->ibqp.srq;
  312. init->cap.max_send_wr = qp->sq.max_wr;
  313. init->cap.max_send_sge = qp->sq.max_sge;
  314. init->cap.max_inline_data = qp->sq.max_inline;
  315. if (!qp->srq) {
  316. init->cap.max_recv_wr = qp->rq.max_wr;
  317. init->cap.max_recv_sge = qp->rq.max_sge;
  318. }
  319. init->sq_sig_type = qp->sq_sig_type;
  320. init->qp_type = qp->ibqp.qp_type;
  321. init->port_num = 1;
  322. return 0;
  323. }
  324. /* called by the modify qp verb, this routine checks all the parameters before
  325. * making any changes
  326. */
  327. int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
  328. struct ib_qp_attr *attr, int mask)
  329. {
  330. enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
  331. attr->cur_qp_state : qp->attr.qp_state;
  332. enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
  333. attr->qp_state : cur_state;
  334. if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
  335. IB_LINK_LAYER_ETHERNET)) {
  336. pr_warn("invalid mask or state for qp\n");
  337. goto err1;
  338. }
  339. if (mask & IB_QP_STATE) {
  340. if (cur_state == IB_QPS_SQD) {
  341. if (qp->req.state == QP_STATE_DRAIN &&
  342. new_state != IB_QPS_ERR)
  343. goto err1;
  344. }
  345. }
  346. if (mask & IB_QP_PORT) {
  347. if (attr->port_num != 1) {
  348. pr_warn("invalid port %d\n", attr->port_num);
  349. goto err1;
  350. }
  351. }
  352. if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
  353. goto err1;
  354. if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
  355. goto err1;
  356. if (mask & IB_QP_ALT_PATH) {
  357. if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
  358. goto err1;
  359. if (attr->alt_port_num != 1) {
  360. pr_warn("invalid alt port %d\n", attr->alt_port_num);
  361. goto err1;
  362. }
  363. if (attr->alt_timeout > 31) {
  364. pr_warn("invalid QP alt timeout %d > 31\n",
  365. attr->alt_timeout);
  366. goto err1;
  367. }
  368. }
  369. if (mask & IB_QP_PATH_MTU) {
  370. struct rxe_port *port = &rxe->port;
  371. enum ib_mtu max_mtu = port->attr.max_mtu;
  372. enum ib_mtu mtu = attr->path_mtu;
  373. if (mtu > max_mtu) {
  374. pr_debug("invalid mtu (%d) > (%d)\n",
  375. ib_mtu_enum_to_int(mtu),
  376. ib_mtu_enum_to_int(max_mtu));
  377. goto err1;
  378. }
  379. }
  380. if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
  381. if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
  382. pr_warn("invalid max_rd_atomic %d > %d\n",
  383. attr->max_rd_atomic,
  384. rxe->attr.max_qp_rd_atom);
  385. goto err1;
  386. }
  387. }
  388. if (mask & IB_QP_TIMEOUT) {
  389. if (attr->timeout > 31) {
  390. pr_warn("invalid QP timeout %d > 31\n",
  391. attr->timeout);
  392. goto err1;
  393. }
  394. }
  395. return 0;
  396. err1:
  397. return -EINVAL;
  398. }
  399. /* move the qp to the reset state */
  400. static void rxe_qp_reset(struct rxe_qp *qp)
  401. {
  402. /* stop tasks from running */
  403. rxe_disable_task(&qp->resp.task);
  404. /* stop request/comp */
  405. if (qp->sq.queue) {
  406. if (qp_type(qp) == IB_QPT_RC)
  407. rxe_disable_task(&qp->comp.task);
  408. rxe_disable_task(&qp->req.task);
  409. }
  410. /* move qp to the reset state */
  411. qp->req.state = QP_STATE_RESET;
  412. qp->resp.state = QP_STATE_RESET;
  413. /* let state machines reset themselves drain work and packet queues
  414. * etc.
  415. */
  416. __rxe_do_task(&qp->resp.task);
  417. if (qp->sq.queue) {
  418. __rxe_do_task(&qp->comp.task);
  419. __rxe_do_task(&qp->req.task);
  420. rxe_queue_reset(qp->sq.queue);
  421. }
  422. /* cleanup attributes */
  423. atomic_set(&qp->ssn, 0);
  424. qp->req.opcode = -1;
  425. qp->req.need_retry = 0;
  426. qp->req.noack_pkts = 0;
  427. qp->resp.msn = 0;
  428. qp->resp.opcode = -1;
  429. qp->resp.drop_msg = 0;
  430. qp->resp.goto_error = 0;
  431. qp->resp.sent_psn_nak = 0;
  432. if (qp->resp.mr) {
  433. rxe_drop_ref(qp->resp.mr);
  434. qp->resp.mr = NULL;
  435. }
  436. cleanup_rd_atomic_resources(qp);
  437. /* reenable tasks */
  438. rxe_enable_task(&qp->resp.task);
  439. if (qp->sq.queue) {
  440. if (qp_type(qp) == IB_QPT_RC)
  441. rxe_enable_task(&qp->comp.task);
  442. rxe_enable_task(&qp->req.task);
  443. }
  444. }
  445. /* drain the send queue */
  446. static void rxe_qp_drain(struct rxe_qp *qp)
  447. {
  448. if (qp->sq.queue) {
  449. if (qp->req.state != QP_STATE_DRAINED) {
  450. qp->req.state = QP_STATE_DRAIN;
  451. if (qp_type(qp) == IB_QPT_RC)
  452. rxe_run_task(&qp->comp.task, 1);
  453. else
  454. __rxe_do_task(&qp->comp.task);
  455. rxe_run_task(&qp->req.task, 1);
  456. }
  457. }
  458. }
  459. /* move the qp to the error state */
  460. void rxe_qp_error(struct rxe_qp *qp)
  461. {
  462. qp->req.state = QP_STATE_ERROR;
  463. qp->resp.state = QP_STATE_ERROR;
  464. qp->attr.qp_state = IB_QPS_ERR;
  465. /* drain work and packet queues */
  466. rxe_run_task(&qp->resp.task, 1);
  467. if (qp_type(qp) == IB_QPT_RC)
  468. rxe_run_task(&qp->comp.task, 1);
  469. else
  470. __rxe_do_task(&qp->comp.task);
  471. rxe_run_task(&qp->req.task, 1);
  472. }
  473. /* called by the modify qp verb */
  474. int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
  475. struct ib_udata *udata)
  476. {
  477. int err;
  478. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  479. union ib_gid sgid;
  480. struct ib_gid_attr sgid_attr;
  481. if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
  482. int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
  483. qp->attr.max_rd_atomic = max_rd_atomic;
  484. atomic_set(&qp->req.rd_atomic, max_rd_atomic);
  485. }
  486. if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  487. int max_dest_rd_atomic =
  488. __roundup_pow_of_two(attr->max_dest_rd_atomic);
  489. qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
  490. free_rd_atomic_resources(qp);
  491. err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
  492. if (err)
  493. return err;
  494. }
  495. if (mask & IB_QP_CUR_STATE)
  496. qp->attr.cur_qp_state = attr->qp_state;
  497. if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
  498. qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
  499. if (mask & IB_QP_ACCESS_FLAGS)
  500. qp->attr.qp_access_flags = attr->qp_access_flags;
  501. if (mask & IB_QP_PKEY_INDEX)
  502. qp->attr.pkey_index = attr->pkey_index;
  503. if (mask & IB_QP_PORT)
  504. qp->attr.port_num = attr->port_num;
  505. if (mask & IB_QP_QKEY)
  506. qp->attr.qkey = attr->qkey;
  507. if (mask & IB_QP_AV) {
  508. ib_get_cached_gid(&rxe->ib_dev, 1,
  509. rdma_ah_read_grh(&attr->ah_attr)->sgid_index,
  510. &sgid, &sgid_attr);
  511. rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
  512. rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr,
  513. &sgid_attr, &sgid);
  514. if (sgid_attr.ndev)
  515. dev_put(sgid_attr.ndev);
  516. }
  517. if (mask & IB_QP_ALT_PATH) {
  518. u8 sgid_index =
  519. rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index;
  520. ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index,
  521. &sgid, &sgid_attr);
  522. rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
  523. &attr->alt_ah_attr);
  524. rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr,
  525. &sgid_attr, &sgid);
  526. if (sgid_attr.ndev)
  527. dev_put(sgid_attr.ndev);
  528. qp->attr.alt_port_num = attr->alt_port_num;
  529. qp->attr.alt_pkey_index = attr->alt_pkey_index;
  530. qp->attr.alt_timeout = attr->alt_timeout;
  531. }
  532. if (mask & IB_QP_PATH_MTU) {
  533. qp->attr.path_mtu = attr->path_mtu;
  534. qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
  535. }
  536. if (mask & IB_QP_TIMEOUT) {
  537. qp->attr.timeout = attr->timeout;
  538. if (attr->timeout == 0) {
  539. qp->qp_timeout_jiffies = 0;
  540. } else {
  541. /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
  542. int j = nsecs_to_jiffies(4096ULL << attr->timeout);
  543. qp->qp_timeout_jiffies = j ? j : 1;
  544. }
  545. }
  546. if (mask & IB_QP_RETRY_CNT) {
  547. qp->attr.retry_cnt = attr->retry_cnt;
  548. qp->comp.retry_cnt = attr->retry_cnt;
  549. pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
  550. attr->retry_cnt);
  551. }
  552. if (mask & IB_QP_RNR_RETRY) {
  553. qp->attr.rnr_retry = attr->rnr_retry;
  554. qp->comp.rnr_retry = attr->rnr_retry;
  555. pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
  556. attr->rnr_retry);
  557. }
  558. if (mask & IB_QP_RQ_PSN) {
  559. qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
  560. qp->resp.psn = qp->attr.rq_psn;
  561. pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
  562. qp->resp.psn);
  563. }
  564. if (mask & IB_QP_MIN_RNR_TIMER) {
  565. qp->attr.min_rnr_timer = attr->min_rnr_timer;
  566. pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
  567. attr->min_rnr_timer);
  568. }
  569. if (mask & IB_QP_SQ_PSN) {
  570. qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
  571. qp->req.psn = qp->attr.sq_psn;
  572. qp->comp.psn = qp->attr.sq_psn;
  573. pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
  574. }
  575. if (mask & IB_QP_PATH_MIG_STATE)
  576. qp->attr.path_mig_state = attr->path_mig_state;
  577. if (mask & IB_QP_DEST_QPN)
  578. qp->attr.dest_qp_num = attr->dest_qp_num;
  579. if (mask & IB_QP_STATE) {
  580. qp->attr.qp_state = attr->qp_state;
  581. switch (attr->qp_state) {
  582. case IB_QPS_RESET:
  583. pr_debug("qp#%d state -> RESET\n", qp_num(qp));
  584. rxe_qp_reset(qp);
  585. break;
  586. case IB_QPS_INIT:
  587. pr_debug("qp#%d state -> INIT\n", qp_num(qp));
  588. qp->req.state = QP_STATE_INIT;
  589. qp->resp.state = QP_STATE_INIT;
  590. break;
  591. case IB_QPS_RTR:
  592. pr_debug("qp#%d state -> RTR\n", qp_num(qp));
  593. qp->resp.state = QP_STATE_READY;
  594. break;
  595. case IB_QPS_RTS:
  596. pr_debug("qp#%d state -> RTS\n", qp_num(qp));
  597. qp->req.state = QP_STATE_READY;
  598. break;
  599. case IB_QPS_SQD:
  600. pr_debug("qp#%d state -> SQD\n", qp_num(qp));
  601. rxe_qp_drain(qp);
  602. break;
  603. case IB_QPS_SQE:
  604. pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
  605. /* Not possible from modify_qp. */
  606. break;
  607. case IB_QPS_ERR:
  608. pr_debug("qp#%d state -> ERR\n", qp_num(qp));
  609. rxe_qp_error(qp);
  610. break;
  611. }
  612. }
  613. return 0;
  614. }
  615. /* called by the query qp verb */
  616. int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
  617. {
  618. *attr = qp->attr;
  619. attr->rq_psn = qp->resp.psn;
  620. attr->sq_psn = qp->req.psn;
  621. attr->cap.max_send_wr = qp->sq.max_wr;
  622. attr->cap.max_send_sge = qp->sq.max_sge;
  623. attr->cap.max_inline_data = qp->sq.max_inline;
  624. if (!qp->srq) {
  625. attr->cap.max_recv_wr = qp->rq.max_wr;
  626. attr->cap.max_recv_sge = qp->rq.max_sge;
  627. }
  628. rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
  629. rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
  630. if (qp->req.state == QP_STATE_DRAIN) {
  631. attr->sq_draining = 1;
  632. /* applications that get this state
  633. * typically spin on it. yield the
  634. * processor
  635. */
  636. cond_resched();
  637. } else {
  638. attr->sq_draining = 0;
  639. }
  640. pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
  641. return 0;
  642. }
  643. /* called by the destroy qp verb */
  644. void rxe_qp_destroy(struct rxe_qp *qp)
  645. {
  646. qp->valid = 0;
  647. qp->qp_timeout_jiffies = 0;
  648. rxe_cleanup_task(&qp->resp.task);
  649. if (qp_type(qp) == IB_QPT_RC) {
  650. del_timer_sync(&qp->retrans_timer);
  651. del_timer_sync(&qp->rnr_nak_timer);
  652. }
  653. rxe_cleanup_task(&qp->req.task);
  654. rxe_cleanup_task(&qp->comp.task);
  655. /* flush out any receive wr's or pending requests */
  656. __rxe_do_task(&qp->req.task);
  657. if (qp->sq.queue) {
  658. __rxe_do_task(&qp->comp.task);
  659. __rxe_do_task(&qp->req.task);
  660. }
  661. }
  662. /* called when the last reference to the qp is dropped */
  663. static void rxe_qp_do_cleanup(struct work_struct *work)
  664. {
  665. struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
  666. rxe_drop_all_mcast_groups(qp);
  667. if (qp->sq.queue)
  668. rxe_queue_cleanup(qp->sq.queue);
  669. if (qp->srq)
  670. rxe_drop_ref(qp->srq);
  671. if (qp->rq.queue)
  672. rxe_queue_cleanup(qp->rq.queue);
  673. if (qp->scq)
  674. rxe_drop_ref(qp->scq);
  675. if (qp->rcq)
  676. rxe_drop_ref(qp->rcq);
  677. if (qp->pd)
  678. rxe_drop_ref(qp->pd);
  679. if (qp->resp.mr) {
  680. rxe_drop_ref(qp->resp.mr);
  681. qp->resp.mr = NULL;
  682. }
  683. if (qp_type(qp) == IB_QPT_RC)
  684. sk_dst_reset(qp->sk->sk);
  685. free_rd_atomic_resources(qp);
  686. kernel_sock_shutdown(qp->sk, SHUT_RDWR);
  687. sock_release(qp->sk);
  688. }
  689. /* called when the last reference to the qp is dropped */
  690. void rxe_qp_cleanup(struct rxe_pool_entry *arg)
  691. {
  692. struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
  693. execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
  694. }