ruc.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/spinlock.h>
  48. #include "hfi.h"
  49. #include "mad.h"
  50. #include "qp.h"
  51. #include "verbs_txreq.h"
  52. #include "trace.h"
  53. /*
  54. * Convert the AETH RNR timeout code into the number of microseconds.
  55. */
  56. const u32 ib_hfi1_rnr_table[32] = {
  57. 655360, /* 00: 655.36 */
  58. 10, /* 01: .01 */
  59. 20, /* 02 .02 */
  60. 30, /* 03: .03 */
  61. 40, /* 04: .04 */
  62. 60, /* 05: .06 */
  63. 80, /* 06: .08 */
  64. 120, /* 07: .12 */
  65. 160, /* 08: .16 */
  66. 240, /* 09: .24 */
  67. 320, /* 0A: .32 */
  68. 480, /* 0B: .48 */
  69. 640, /* 0C: .64 */
  70. 960, /* 0D: .96 */
  71. 1280, /* 0E: 1.28 */
  72. 1920, /* 0F: 1.92 */
  73. 2560, /* 10: 2.56 */
  74. 3840, /* 11: 3.84 */
  75. 5120, /* 12: 5.12 */
  76. 7680, /* 13: 7.68 */
  77. 10240, /* 14: 10.24 */
  78. 15360, /* 15: 15.36 */
  79. 20480, /* 16: 20.48 */
  80. 30720, /* 17: 30.72 */
  81. 40960, /* 18: 40.96 */
  82. 61440, /* 19: 61.44 */
  83. 81920, /* 1A: 81.92 */
  84. 122880, /* 1B: 122.88 */
  85. 163840, /* 1C: 163.84 */
  86. 245760, /* 1D: 245.76 */
  87. 327680, /* 1E: 327.68 */
  88. 491520 /* 1F: 491.52 */
  89. };
  90. /*
  91. * Validate a RWQE and fill in the SGE state.
  92. * Return 1 if OK.
  93. */
  94. static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
  95. {
  96. int i, j, ret;
  97. struct ib_wc wc;
  98. struct rvt_lkey_table *rkt;
  99. struct rvt_pd *pd;
  100. struct rvt_sge_state *ss;
  101. rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
  102. pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
  103. ss = &qp->r_sge;
  104. ss->sg_list = qp->r_sg_list;
  105. qp->r_len = 0;
  106. for (i = j = 0; i < wqe->num_sge; i++) {
  107. if (wqe->sg_list[i].length == 0)
  108. continue;
  109. /* Check LKEY */
  110. if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
  111. &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
  112. goto bad_lkey;
  113. qp->r_len += wqe->sg_list[i].length;
  114. j++;
  115. }
  116. ss->num_sge = j;
  117. ss->total_len = qp->r_len;
  118. ret = 1;
  119. goto bail;
  120. bad_lkey:
  121. while (j) {
  122. struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
  123. rvt_put_mr(sge->mr);
  124. }
  125. ss->num_sge = 0;
  126. memset(&wc, 0, sizeof(wc));
  127. wc.wr_id = wqe->wr_id;
  128. wc.status = IB_WC_LOC_PROT_ERR;
  129. wc.opcode = IB_WC_RECV;
  130. wc.qp = &qp->ibqp;
  131. /* Signal solicited completion event. */
  132. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
  133. ret = 0;
  134. bail:
  135. return ret;
  136. }
  137. /**
  138. * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
  139. * @qp: the QP
  140. * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
  141. *
  142. * Return -1 if there is a local error, 0 if no RWQE is available,
  143. * otherwise return 1.
  144. *
  145. * Can be called from interrupt level.
  146. */
  147. int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
  148. {
  149. unsigned long flags;
  150. struct rvt_rq *rq;
  151. struct rvt_rwq *wq;
  152. struct rvt_srq *srq;
  153. struct rvt_rwqe *wqe;
  154. void (*handler)(struct ib_event *, void *);
  155. u32 tail;
  156. int ret;
  157. if (qp->ibqp.srq) {
  158. srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
  159. handler = srq->ibsrq.event_handler;
  160. rq = &srq->rq;
  161. } else {
  162. srq = NULL;
  163. handler = NULL;
  164. rq = &qp->r_rq;
  165. }
  166. spin_lock_irqsave(&rq->lock, flags);
  167. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
  168. ret = 0;
  169. goto unlock;
  170. }
  171. wq = rq->wq;
  172. tail = wq->tail;
  173. /* Validate tail before using it since it is user writable. */
  174. if (tail >= rq->size)
  175. tail = 0;
  176. if (unlikely(tail == wq->head)) {
  177. ret = 0;
  178. goto unlock;
  179. }
  180. /* Make sure entry is read after head index is read. */
  181. smp_rmb();
  182. wqe = rvt_get_rwqe_ptr(rq, tail);
  183. /*
  184. * Even though we update the tail index in memory, the verbs
  185. * consumer is not supposed to post more entries until a
  186. * completion is generated.
  187. */
  188. if (++tail >= rq->size)
  189. tail = 0;
  190. wq->tail = tail;
  191. if (!wr_id_only && !init_sge(qp, wqe)) {
  192. ret = -1;
  193. goto unlock;
  194. }
  195. qp->r_wr_id = wqe->wr_id;
  196. ret = 1;
  197. set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
  198. if (handler) {
  199. u32 n;
  200. /*
  201. * Validate head pointer value and compute
  202. * the number of remaining WQEs.
  203. */
  204. n = wq->head;
  205. if (n >= rq->size)
  206. n = 0;
  207. if (n < tail)
  208. n += rq->size - tail;
  209. else
  210. n -= tail;
  211. if (n < srq->limit) {
  212. struct ib_event ev;
  213. srq->limit = 0;
  214. spin_unlock_irqrestore(&rq->lock, flags);
  215. ev.device = qp->ibqp.device;
  216. ev.element.srq = qp->ibqp.srq;
  217. ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
  218. handler(&ev, srq->ibsrq.srq_context);
  219. goto bail;
  220. }
  221. }
  222. unlock:
  223. spin_unlock_irqrestore(&rq->lock, flags);
  224. bail:
  225. return ret;
  226. }
  227. static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
  228. {
  229. return (gid->global.interface_id == id &&
  230. (gid->global.subnet_prefix == gid_prefix ||
  231. gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
  232. }
  233. /*
  234. *
  235. * This should be called with the QP r_lock held.
  236. *
  237. * The s_lock will be acquired around the hfi1_migrate_qp() call.
  238. */
  239. int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
  240. int has_grh, struct rvt_qp *qp, u32 bth0)
  241. {
  242. __be64 guid;
  243. unsigned long flags;
  244. u8 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
  245. if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
  246. if (!has_grh) {
  247. if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
  248. goto err;
  249. } else {
  250. if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
  251. goto err;
  252. guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
  253. if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
  254. guid))
  255. goto err;
  256. if (!gid_ok(
  257. &hdr->u.l.grh.sgid,
  258. qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
  259. qp->alt_ah_attr.grh.dgid.global.interface_id))
  260. goto err;
  261. }
  262. if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
  263. sc5, be16_to_cpu(hdr->lrh[3])))) {
  264. hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
  265. (u16)bth0,
  266. (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
  267. 0, qp->ibqp.qp_num,
  268. be16_to_cpu(hdr->lrh[3]),
  269. be16_to_cpu(hdr->lrh[1]));
  270. goto err;
  271. }
  272. /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
  273. if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
  274. ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
  275. goto err;
  276. spin_lock_irqsave(&qp->s_lock, flags);
  277. hfi1_migrate_qp(qp);
  278. spin_unlock_irqrestore(&qp->s_lock, flags);
  279. } else {
  280. if (!has_grh) {
  281. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  282. goto err;
  283. } else {
  284. if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
  285. goto err;
  286. guid = get_sguid(ibp,
  287. qp->remote_ah_attr.grh.sgid_index);
  288. if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
  289. guid))
  290. goto err;
  291. if (!gid_ok(
  292. &hdr->u.l.grh.sgid,
  293. qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
  294. qp->remote_ah_attr.grh.dgid.global.interface_id))
  295. goto err;
  296. }
  297. if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
  298. sc5, be16_to_cpu(hdr->lrh[3])))) {
  299. hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
  300. (u16)bth0,
  301. (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
  302. 0, qp->ibqp.qp_num,
  303. be16_to_cpu(hdr->lrh[3]),
  304. be16_to_cpu(hdr->lrh[1]));
  305. goto err;
  306. }
  307. /* Validate the SLID. See Ch. 9.6.1.5 */
  308. if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
  309. ppd_from_ibp(ibp)->port != qp->port_num)
  310. goto err;
  311. if (qp->s_mig_state == IB_MIG_REARM &&
  312. !(bth0 & IB_BTH_MIG_REQ))
  313. qp->s_mig_state = IB_MIG_ARMED;
  314. }
  315. return 0;
  316. err:
  317. return 1;
  318. }
  319. /**
  320. * ruc_loopback - handle UC and RC loopback requests
  321. * @sqp: the sending QP
  322. *
  323. * This is called from hfi1_do_send() to
  324. * forward a WQE addressed to the same HFI.
  325. * Note that although we are single threaded due to the send engine, we still
  326. * have to protect against post_send(). We don't have to worry about
  327. * receive interrupts since this is a connected protocol and all packets
  328. * will pass through here.
  329. */
  330. static void ruc_loopback(struct rvt_qp *sqp)
  331. {
  332. struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
  333. struct rvt_qp *qp;
  334. struct rvt_swqe *wqe;
  335. struct rvt_sge *sge;
  336. unsigned long flags;
  337. struct ib_wc wc;
  338. u64 sdata;
  339. atomic64_t *maddr;
  340. enum ib_wc_status send_status;
  341. int release;
  342. int ret;
  343. int copy_last = 0;
  344. u32 to;
  345. int local_ops = 0;
  346. rcu_read_lock();
  347. /*
  348. * Note that we check the responder QP state after
  349. * checking the requester's state.
  350. */
  351. qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
  352. sqp->remote_qpn);
  353. spin_lock_irqsave(&sqp->s_lock, flags);
  354. /* Return if we are already busy processing a work request. */
  355. if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
  356. !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
  357. goto unlock;
  358. sqp->s_flags |= RVT_S_BUSY;
  359. again:
  360. smp_read_barrier_depends(); /* see post_one_send() */
  361. if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
  362. goto clr_busy;
  363. wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
  364. /* Return if it is not OK to start a new work request. */
  365. if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
  366. if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
  367. goto clr_busy;
  368. /* We are in the error state, flush the work request. */
  369. send_status = IB_WC_WR_FLUSH_ERR;
  370. goto flush_send;
  371. }
  372. /*
  373. * We can rely on the entry not changing without the s_lock
  374. * being held until we update s_last.
  375. * We increment s_cur to indicate s_last is in progress.
  376. */
  377. if (sqp->s_last == sqp->s_cur) {
  378. if (++sqp->s_cur >= sqp->s_size)
  379. sqp->s_cur = 0;
  380. }
  381. spin_unlock_irqrestore(&sqp->s_lock, flags);
  382. if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
  383. qp->ibqp.qp_type != sqp->ibqp.qp_type) {
  384. ibp->rvp.n_pkt_drops++;
  385. /*
  386. * For RC, the requester would timeout and retry so
  387. * shortcut the timeouts and just signal too many retries.
  388. */
  389. if (sqp->ibqp.qp_type == IB_QPT_RC)
  390. send_status = IB_WC_RETRY_EXC_ERR;
  391. else
  392. send_status = IB_WC_SUCCESS;
  393. goto serr;
  394. }
  395. memset(&wc, 0, sizeof(wc));
  396. send_status = IB_WC_SUCCESS;
  397. release = 1;
  398. sqp->s_sge.sge = wqe->sg_list[0];
  399. sqp->s_sge.sg_list = wqe->sg_list + 1;
  400. sqp->s_sge.num_sge = wqe->wr.num_sge;
  401. sqp->s_len = wqe->length;
  402. switch (wqe->wr.opcode) {
  403. case IB_WR_REG_MR:
  404. goto send_comp;
  405. case IB_WR_LOCAL_INV:
  406. if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
  407. if (rvt_invalidate_rkey(sqp,
  408. wqe->wr.ex.invalidate_rkey))
  409. send_status = IB_WC_LOC_PROT_ERR;
  410. local_ops = 1;
  411. }
  412. goto send_comp;
  413. case IB_WR_SEND_WITH_INV:
  414. if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
  415. wc.wc_flags = IB_WC_WITH_INVALIDATE;
  416. wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
  417. }
  418. goto send;
  419. case IB_WR_SEND_WITH_IMM:
  420. wc.wc_flags = IB_WC_WITH_IMM;
  421. wc.ex.imm_data = wqe->wr.ex.imm_data;
  422. /* FALLTHROUGH */
  423. case IB_WR_SEND:
  424. send:
  425. ret = hfi1_rvt_get_rwqe(qp, 0);
  426. if (ret < 0)
  427. goto op_err;
  428. if (!ret)
  429. goto rnr_nak;
  430. break;
  431. case IB_WR_RDMA_WRITE_WITH_IMM:
  432. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  433. goto inv_err;
  434. wc.wc_flags = IB_WC_WITH_IMM;
  435. wc.ex.imm_data = wqe->wr.ex.imm_data;
  436. ret = hfi1_rvt_get_rwqe(qp, 1);
  437. if (ret < 0)
  438. goto op_err;
  439. if (!ret)
  440. goto rnr_nak;
  441. /* skip copy_last set and qp_access_flags recheck */
  442. goto do_write;
  443. case IB_WR_RDMA_WRITE:
  444. copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
  445. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  446. goto inv_err;
  447. do_write:
  448. if (wqe->length == 0)
  449. break;
  450. if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
  451. wqe->rdma_wr.remote_addr,
  452. wqe->rdma_wr.rkey,
  453. IB_ACCESS_REMOTE_WRITE)))
  454. goto acc_err;
  455. qp->r_sge.sg_list = NULL;
  456. qp->r_sge.num_sge = 1;
  457. qp->r_sge.total_len = wqe->length;
  458. break;
  459. case IB_WR_RDMA_READ:
  460. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
  461. goto inv_err;
  462. if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
  463. wqe->rdma_wr.remote_addr,
  464. wqe->rdma_wr.rkey,
  465. IB_ACCESS_REMOTE_READ)))
  466. goto acc_err;
  467. release = 0;
  468. sqp->s_sge.sg_list = NULL;
  469. sqp->s_sge.num_sge = 1;
  470. qp->r_sge.sge = wqe->sg_list[0];
  471. qp->r_sge.sg_list = wqe->sg_list + 1;
  472. qp->r_sge.num_sge = wqe->wr.num_sge;
  473. qp->r_sge.total_len = wqe->length;
  474. break;
  475. case IB_WR_ATOMIC_CMP_AND_SWP:
  476. case IB_WR_ATOMIC_FETCH_AND_ADD:
  477. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
  478. goto inv_err;
  479. if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
  480. wqe->atomic_wr.remote_addr,
  481. wqe->atomic_wr.rkey,
  482. IB_ACCESS_REMOTE_ATOMIC)))
  483. goto acc_err;
  484. /* Perform atomic OP and save result. */
  485. maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
  486. sdata = wqe->atomic_wr.compare_add;
  487. *(u64 *)sqp->s_sge.sge.vaddr =
  488. (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
  489. (u64)atomic64_add_return(sdata, maddr) - sdata :
  490. (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
  491. sdata, wqe->atomic_wr.swap);
  492. rvt_put_mr(qp->r_sge.sge.mr);
  493. qp->r_sge.num_sge = 0;
  494. goto send_comp;
  495. default:
  496. send_status = IB_WC_LOC_QP_OP_ERR;
  497. goto serr;
  498. }
  499. sge = &sqp->s_sge.sge;
  500. while (sqp->s_len) {
  501. u32 len = sqp->s_len;
  502. if (len > sge->length)
  503. len = sge->length;
  504. if (len > sge->sge_length)
  505. len = sge->sge_length;
  506. WARN_ON_ONCE(len == 0);
  507. hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
  508. sge->vaddr += len;
  509. sge->length -= len;
  510. sge->sge_length -= len;
  511. if (sge->sge_length == 0) {
  512. if (!release)
  513. rvt_put_mr(sge->mr);
  514. if (--sqp->s_sge.num_sge)
  515. *sge = *sqp->s_sge.sg_list++;
  516. } else if (sge->length == 0 && sge->mr->lkey) {
  517. if (++sge->n >= RVT_SEGSZ) {
  518. if (++sge->m >= sge->mr->mapsz)
  519. break;
  520. sge->n = 0;
  521. }
  522. sge->vaddr =
  523. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  524. sge->length =
  525. sge->mr->map[sge->m]->segs[sge->n].length;
  526. }
  527. sqp->s_len -= len;
  528. }
  529. if (release)
  530. rvt_put_ss(&qp->r_sge);
  531. if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
  532. goto send_comp;
  533. if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
  534. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  535. else
  536. wc.opcode = IB_WC_RECV;
  537. wc.wr_id = qp->r_wr_id;
  538. wc.status = IB_WC_SUCCESS;
  539. wc.byte_len = wqe->length;
  540. wc.qp = &qp->ibqp;
  541. wc.src_qp = qp->remote_qpn;
  542. wc.slid = qp->remote_ah_attr.dlid;
  543. wc.sl = qp->remote_ah_attr.sl;
  544. wc.port_num = 1;
  545. /* Signal completion event if the solicited bit is set. */
  546. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
  547. wqe->wr.send_flags & IB_SEND_SOLICITED);
  548. send_comp:
  549. spin_lock_irqsave(&sqp->s_lock, flags);
  550. ibp->rvp.n_loop_pkts++;
  551. flush_send:
  552. sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
  553. hfi1_send_complete(sqp, wqe, send_status);
  554. if (local_ops) {
  555. atomic_dec(&sqp->local_ops_pending);
  556. local_ops = 0;
  557. }
  558. goto again;
  559. rnr_nak:
  560. /* Handle RNR NAK */
  561. if (qp->ibqp.qp_type == IB_QPT_UC)
  562. goto send_comp;
  563. ibp->rvp.n_rnr_naks++;
  564. /*
  565. * Note: we don't need the s_lock held since the BUSY flag
  566. * makes this single threaded.
  567. */
  568. if (sqp->s_rnr_retry == 0) {
  569. send_status = IB_WC_RNR_RETRY_EXC_ERR;
  570. goto serr;
  571. }
  572. if (sqp->s_rnr_retry_cnt < 7)
  573. sqp->s_rnr_retry--;
  574. spin_lock_irqsave(&sqp->s_lock, flags);
  575. if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
  576. goto clr_busy;
  577. to = ib_hfi1_rnr_table[qp->r_min_rnr_timer];
  578. hfi1_add_rnr_timer(sqp, to);
  579. goto clr_busy;
  580. op_err:
  581. send_status = IB_WC_REM_OP_ERR;
  582. wc.status = IB_WC_LOC_QP_OP_ERR;
  583. goto err;
  584. inv_err:
  585. send_status = IB_WC_REM_INV_REQ_ERR;
  586. wc.status = IB_WC_LOC_QP_OP_ERR;
  587. goto err;
  588. acc_err:
  589. send_status = IB_WC_REM_ACCESS_ERR;
  590. wc.status = IB_WC_LOC_PROT_ERR;
  591. err:
  592. /* responder goes to error state */
  593. hfi1_rc_error(qp, wc.status);
  594. serr:
  595. spin_lock_irqsave(&sqp->s_lock, flags);
  596. hfi1_send_complete(sqp, wqe, send_status);
  597. if (sqp->ibqp.qp_type == IB_QPT_RC) {
  598. int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
  599. sqp->s_flags &= ~RVT_S_BUSY;
  600. spin_unlock_irqrestore(&sqp->s_lock, flags);
  601. if (lastwqe) {
  602. struct ib_event ev;
  603. ev.device = sqp->ibqp.device;
  604. ev.element.qp = &sqp->ibqp;
  605. ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
  606. sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
  607. }
  608. goto done;
  609. }
  610. clr_busy:
  611. sqp->s_flags &= ~RVT_S_BUSY;
  612. unlock:
  613. spin_unlock_irqrestore(&sqp->s_lock, flags);
  614. done:
  615. rcu_read_unlock();
  616. }
  617. /**
  618. * hfi1_make_grh - construct a GRH header
  619. * @ibp: a pointer to the IB port
  620. * @hdr: a pointer to the GRH header being constructed
  621. * @grh: the global route address to send to
  622. * @hwords: the number of 32 bit words of header being sent
  623. * @nwords: the number of 32 bit words of data being sent
  624. *
  625. * Return the size of the header in 32 bit words.
  626. */
  627. u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
  628. struct ib_global_route *grh, u32 hwords, u32 nwords)
  629. {
  630. hdr->version_tclass_flow =
  631. cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
  632. (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
  633. (grh->flow_label << IB_GRH_FLOW_SHIFT));
  634. hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
  635. /* next_hdr is defined by C8-7 in ch. 8.4.1 */
  636. hdr->next_hdr = IB_GRH_NEXT_HDR;
  637. hdr->hop_limit = grh->hop_limit;
  638. /* The SGID is 32-bit aligned. */
  639. hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
  640. hdr->sgid.global.interface_id =
  641. grh->sgid_index < HFI1_GUIDS_PER_PORT ?
  642. get_sguid(ibp, grh->sgid_index) :
  643. get_sguid(ibp, HFI1_PORT_GUID_INDEX);
  644. hdr->dgid = grh->dgid;
  645. /* GRH header size in 32-bit words. */
  646. return sizeof(struct ib_grh) / sizeof(u32);
  647. }
  648. #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4)
  649. /**
  650. * build_ahg - create ahg in s_ahg
  651. * @qp: a pointer to QP
  652. * @npsn: the next PSN for the request/response
  653. *
  654. * This routine handles the AHG by allocating an ahg entry and causing the
  655. * copy of the first middle.
  656. *
  657. * Subsequent middles use the copied entry, editing the
  658. * PSN with 1 or 2 edits.
  659. */
  660. static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
  661. {
  662. struct hfi1_qp_priv *priv = qp->priv;
  663. if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
  664. clear_ahg(qp);
  665. if (!(qp->s_flags & RVT_S_AHG_VALID)) {
  666. /* first middle that needs copy */
  667. if (qp->s_ahgidx < 0)
  668. qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
  669. if (qp->s_ahgidx >= 0) {
  670. qp->s_ahgpsn = npsn;
  671. priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
  672. /* save to protect a change in another thread */
  673. priv->s_ahg->ahgidx = qp->s_ahgidx;
  674. qp->s_flags |= RVT_S_AHG_VALID;
  675. }
  676. } else {
  677. /* subsequent middle after valid */
  678. if (qp->s_ahgidx >= 0) {
  679. priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
  680. priv->s_ahg->ahgidx = qp->s_ahgidx;
  681. priv->s_ahg->ahgcount++;
  682. priv->s_ahg->ahgdesc[0] =
  683. sdma_build_ahg_descriptor(
  684. (__force u16)cpu_to_be16((u16)npsn),
  685. BTH2_OFFSET,
  686. 16,
  687. 16);
  688. if ((npsn & 0xffff0000) !=
  689. (qp->s_ahgpsn & 0xffff0000)) {
  690. priv->s_ahg->ahgcount++;
  691. priv->s_ahg->ahgdesc[1] =
  692. sdma_build_ahg_descriptor(
  693. (__force u16)cpu_to_be16(
  694. (u16)(npsn >> 16)),
  695. BTH2_OFFSET,
  696. 0,
  697. 16);
  698. }
  699. }
  700. }
  701. }
  702. void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
  703. u32 bth0, u32 bth2, int middle,
  704. struct hfi1_pkt_state *ps)
  705. {
  706. struct hfi1_qp_priv *priv = qp->priv;
  707. struct hfi1_ibport *ibp = ps->ibp;
  708. u16 lrh0;
  709. u32 nwords;
  710. u32 extra_bytes;
  711. u32 bth1;
  712. /* Construct the header. */
  713. extra_bytes = -ps->s_txreq->s_cur_size & 3;
  714. nwords = (ps->s_txreq->s_cur_size + extra_bytes) >> 2;
  715. lrh0 = HFI1_LRH_BTH;
  716. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  717. qp->s_hdrwords += hfi1_make_grh(ibp,
  718. &ps->s_txreq->phdr.hdr.u.l.grh,
  719. &qp->remote_ah_attr.grh,
  720. qp->s_hdrwords, nwords);
  721. lrh0 = HFI1_LRH_GRH;
  722. middle = 0;
  723. }
  724. lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
  725. /*
  726. * reset s_ahg/AHG fields
  727. *
  728. * This insures that the ahgentry/ahgcount
  729. * are at a non-AHG default to protect
  730. * build_verbs_tx_desc() from using
  731. * an include ahgidx.
  732. *
  733. * build_ahg() will modify as appropriate
  734. * to use the AHG feature.
  735. */
  736. priv->s_ahg->tx_flags = 0;
  737. priv->s_ahg->ahgcount = 0;
  738. priv->s_ahg->ahgidx = 0;
  739. if (qp->s_mig_state == IB_MIG_MIGRATED)
  740. bth0 |= IB_BTH_MIG_REQ;
  741. else
  742. middle = 0;
  743. if (middle)
  744. build_ahg(qp, bth2);
  745. else
  746. qp->s_flags &= ~RVT_S_AHG_VALID;
  747. ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
  748. ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  749. ps->s_txreq->phdr.hdr.lrh[2] =
  750. cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
  751. ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
  752. qp->remote_ah_attr.src_path_bits);
  753. bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
  754. bth0 |= extra_bytes << 20;
  755. ohdr->bth[0] = cpu_to_be32(bth0);
  756. bth1 = qp->remote_qpn;
  757. if (qp->s_flags & RVT_S_ECN) {
  758. qp->s_flags &= ~RVT_S_ECN;
  759. /* we recently received a FECN, so return a BECN */
  760. bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
  761. }
  762. ohdr->bth[1] = cpu_to_be32(bth1);
  763. ohdr->bth[2] = cpu_to_be32(bth2);
  764. }
  765. /* when sending, force a reschedule every one of these periods */
  766. #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
  767. void _hfi1_do_send(struct work_struct *work)
  768. {
  769. struct iowait *wait = container_of(work, struct iowait, iowork);
  770. struct rvt_qp *qp = iowait_to_qp(wait);
  771. hfi1_do_send(qp);
  772. }
  773. /**
  774. * hfi1_do_send - perform a send on a QP
  775. * @work: contains a pointer to the QP
  776. *
  777. * Process entries in the send work queue until credit or queue is
  778. * exhausted. Only allow one CPU to send a packet per QP.
  779. * Otherwise, two threads could send packets out of order.
  780. */
  781. void hfi1_do_send(struct rvt_qp *qp)
  782. {
  783. struct hfi1_pkt_state ps;
  784. struct hfi1_qp_priv *priv = qp->priv;
  785. int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
  786. unsigned long timeout;
  787. unsigned long timeout_int;
  788. int cpu;
  789. ps.dev = to_idev(qp->ibqp.device);
  790. ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
  791. ps.ppd = ppd_from_ibp(ps.ibp);
  792. switch (qp->ibqp.qp_type) {
  793. case IB_QPT_RC:
  794. if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
  795. ) - 1)) ==
  796. ps.ppd->lid)) {
  797. ruc_loopback(qp);
  798. return;
  799. }
  800. make_req = hfi1_make_rc_req;
  801. timeout_int = (qp->timeout_jiffies);
  802. break;
  803. case IB_QPT_UC:
  804. if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
  805. ) - 1)) ==
  806. ps.ppd->lid)) {
  807. ruc_loopback(qp);
  808. return;
  809. }
  810. make_req = hfi1_make_uc_req;
  811. timeout_int = SEND_RESCHED_TIMEOUT;
  812. break;
  813. default:
  814. make_req = hfi1_make_ud_req;
  815. timeout_int = SEND_RESCHED_TIMEOUT;
  816. }
  817. spin_lock_irqsave(&qp->s_lock, ps.flags);
  818. /* Return if we are already busy processing a work request. */
  819. if (!hfi1_send_ok(qp)) {
  820. spin_unlock_irqrestore(&qp->s_lock, ps.flags);
  821. return;
  822. }
  823. qp->s_flags |= RVT_S_BUSY;
  824. timeout = jiffies + (timeout_int) / 8;
  825. cpu = priv->s_sde ? priv->s_sde->cpu :
  826. cpumask_first(cpumask_of_node(ps.ppd->dd->node));
  827. /* insure a pre-built packet is handled */
  828. ps.s_txreq = get_waiting_verbs_txreq(qp);
  829. do {
  830. /* Check for a constructed packet to be sent. */
  831. if (qp->s_hdrwords != 0) {
  832. spin_unlock_irqrestore(&qp->s_lock, ps.flags);
  833. /*
  834. * If the packet cannot be sent now, return and
  835. * the send engine will be woken up later.
  836. */
  837. if (hfi1_verbs_send(qp, &ps))
  838. return;
  839. /* Record that s_ahg is empty. */
  840. qp->s_hdrwords = 0;
  841. /* allow other tasks to run */
  842. if (unlikely(time_after(jiffies, timeout))) {
  843. if (workqueue_congested(cpu,
  844. ps.ppd->hfi1_wq)) {
  845. spin_lock_irqsave(
  846. &qp->s_lock,
  847. ps.flags);
  848. qp->s_flags &= ~RVT_S_BUSY;
  849. hfi1_schedule_send(qp);
  850. spin_unlock_irqrestore(
  851. &qp->s_lock,
  852. ps.flags);
  853. this_cpu_inc(
  854. *ps.ppd->dd->send_schedule);
  855. return;
  856. }
  857. if (!irqs_disabled()) {
  858. cond_resched();
  859. this_cpu_inc(
  860. *ps.ppd->dd->send_schedule);
  861. }
  862. timeout = jiffies + (timeout_int) / 8;
  863. }
  864. spin_lock_irqsave(&qp->s_lock, ps.flags);
  865. }
  866. } while (make_req(qp, &ps));
  867. spin_unlock_irqrestore(&qp->s_lock, ps.flags);
  868. }
  869. /*
  870. * This should be called with s_lock held.
  871. */
  872. void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
  873. enum ib_wc_status status)
  874. {
  875. u32 old_last, last;
  876. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
  877. return;
  878. last = qp->s_last;
  879. old_last = last;
  880. if (++last >= qp->s_size)
  881. last = 0;
  882. qp->s_last = last;
  883. /* See post_send() */
  884. barrier();
  885. rvt_put_swqe(wqe);
  886. if (qp->ibqp.qp_type == IB_QPT_UD ||
  887. qp->ibqp.qp_type == IB_QPT_SMI ||
  888. qp->ibqp.qp_type == IB_QPT_GSI)
  889. atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
  890. rvt_qp_swqe_complete(qp, wqe, status);
  891. if (qp->s_acked == old_last)
  892. qp->s_acked = last;
  893. if (qp->s_cur == old_last)
  894. qp->s_cur = last;
  895. if (qp->s_tail == old_last)
  896. qp->s_tail = last;
  897. if (qp->state == IB_QPS_SQD && last == qp->s_cur)
  898. qp->s_draining = 0;
  899. }