rxe_resp.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/skbuff.h>
  34. #include "rxe.h"
  35. #include "rxe_loc.h"
  36. #include "rxe_queue.h"
  37. enum resp_states {
  38. RESPST_NONE,
  39. RESPST_GET_REQ,
  40. RESPST_CHK_PSN,
  41. RESPST_CHK_OP_SEQ,
  42. RESPST_CHK_OP_VALID,
  43. RESPST_CHK_RESOURCE,
  44. RESPST_CHK_LENGTH,
  45. RESPST_CHK_RKEY,
  46. RESPST_EXECUTE,
  47. RESPST_READ_REPLY,
  48. RESPST_COMPLETE,
  49. RESPST_ACKNOWLEDGE,
  50. RESPST_CLEANUP,
  51. RESPST_DUPLICATE_REQUEST,
  52. RESPST_ERR_MALFORMED_WQE,
  53. RESPST_ERR_UNSUPPORTED_OPCODE,
  54. RESPST_ERR_MISALIGNED_ATOMIC,
  55. RESPST_ERR_PSN_OUT_OF_SEQ,
  56. RESPST_ERR_MISSING_OPCODE_FIRST,
  57. RESPST_ERR_MISSING_OPCODE_LAST_C,
  58. RESPST_ERR_MISSING_OPCODE_LAST_D1E,
  59. RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
  60. RESPST_ERR_RNR,
  61. RESPST_ERR_RKEY_VIOLATION,
  62. RESPST_ERR_LENGTH,
  63. RESPST_ERR_CQ_OVERFLOW,
  64. RESPST_ERROR,
  65. RESPST_RESET,
  66. RESPST_DONE,
  67. RESPST_EXIT,
  68. };
  69. static char *resp_state_name[] = {
  70. [RESPST_NONE] = "NONE",
  71. [RESPST_GET_REQ] = "GET_REQ",
  72. [RESPST_CHK_PSN] = "CHK_PSN",
  73. [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
  74. [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
  75. [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
  76. [RESPST_CHK_LENGTH] = "CHK_LENGTH",
  77. [RESPST_CHK_RKEY] = "CHK_RKEY",
  78. [RESPST_EXECUTE] = "EXECUTE",
  79. [RESPST_READ_REPLY] = "READ_REPLY",
  80. [RESPST_COMPLETE] = "COMPLETE",
  81. [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
  82. [RESPST_CLEANUP] = "CLEANUP",
  83. [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
  84. [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
  85. [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
  86. [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
  87. [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
  88. [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
  89. [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
  90. [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
  91. [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
  92. [RESPST_ERR_RNR] = "ERR_RNR",
  93. [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
  94. [RESPST_ERR_LENGTH] = "ERR_LENGTH",
  95. [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
  96. [RESPST_ERROR] = "ERROR",
  97. [RESPST_RESET] = "RESET",
  98. [RESPST_DONE] = "DONE",
  99. [RESPST_EXIT] = "EXIT",
  100. };
  101. /* rxe_recv calls here to add a request packet to the input queue */
  102. void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
  103. struct sk_buff *skb)
  104. {
  105. int must_sched;
  106. struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  107. skb_queue_tail(&qp->req_pkts, skb);
  108. must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
  109. (skb_queue_len(&qp->req_pkts) > 1);
  110. rxe_run_task(&qp->resp.task, must_sched);
  111. }
  112. static inline enum resp_states get_req(struct rxe_qp *qp,
  113. struct rxe_pkt_info **pkt_p)
  114. {
  115. struct sk_buff *skb;
  116. if (qp->resp.state == QP_STATE_ERROR) {
  117. skb = skb_dequeue(&qp->req_pkts);
  118. if (skb) {
  119. /* drain request packet queue */
  120. rxe_drop_ref(qp);
  121. kfree_skb(skb);
  122. return RESPST_GET_REQ;
  123. }
  124. /* go drain recv wr queue */
  125. return RESPST_CHK_RESOURCE;
  126. }
  127. skb = skb_peek(&qp->req_pkts);
  128. if (!skb)
  129. return RESPST_EXIT;
  130. *pkt_p = SKB_TO_PKT(skb);
  131. return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
  132. }
  133. static enum resp_states check_psn(struct rxe_qp *qp,
  134. struct rxe_pkt_info *pkt)
  135. {
  136. int diff = psn_compare(pkt->psn, qp->resp.psn);
  137. switch (qp_type(qp)) {
  138. case IB_QPT_RC:
  139. if (diff > 0) {
  140. if (qp->resp.sent_psn_nak)
  141. return RESPST_CLEANUP;
  142. qp->resp.sent_psn_nak = 1;
  143. return RESPST_ERR_PSN_OUT_OF_SEQ;
  144. } else if (diff < 0) {
  145. return RESPST_DUPLICATE_REQUEST;
  146. }
  147. if (qp->resp.sent_psn_nak)
  148. qp->resp.sent_psn_nak = 0;
  149. break;
  150. case IB_QPT_UC:
  151. if (qp->resp.drop_msg || diff != 0) {
  152. if (pkt->mask & RXE_START_MASK) {
  153. qp->resp.drop_msg = 0;
  154. return RESPST_CHK_OP_SEQ;
  155. }
  156. qp->resp.drop_msg = 1;
  157. return RESPST_CLEANUP;
  158. }
  159. break;
  160. default:
  161. break;
  162. }
  163. return RESPST_CHK_OP_SEQ;
  164. }
  165. static enum resp_states check_op_seq(struct rxe_qp *qp,
  166. struct rxe_pkt_info *pkt)
  167. {
  168. switch (qp_type(qp)) {
  169. case IB_QPT_RC:
  170. switch (qp->resp.opcode) {
  171. case IB_OPCODE_RC_SEND_FIRST:
  172. case IB_OPCODE_RC_SEND_MIDDLE:
  173. switch (pkt->opcode) {
  174. case IB_OPCODE_RC_SEND_MIDDLE:
  175. case IB_OPCODE_RC_SEND_LAST:
  176. case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
  177. case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
  178. return RESPST_CHK_OP_VALID;
  179. default:
  180. return RESPST_ERR_MISSING_OPCODE_LAST_C;
  181. }
  182. case IB_OPCODE_RC_RDMA_WRITE_FIRST:
  183. case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
  184. switch (pkt->opcode) {
  185. case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
  186. case IB_OPCODE_RC_RDMA_WRITE_LAST:
  187. case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  188. return RESPST_CHK_OP_VALID;
  189. default:
  190. return RESPST_ERR_MISSING_OPCODE_LAST_C;
  191. }
  192. default:
  193. switch (pkt->opcode) {
  194. case IB_OPCODE_RC_SEND_MIDDLE:
  195. case IB_OPCODE_RC_SEND_LAST:
  196. case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
  197. case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
  198. case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
  199. case IB_OPCODE_RC_RDMA_WRITE_LAST:
  200. case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  201. return RESPST_ERR_MISSING_OPCODE_FIRST;
  202. default:
  203. return RESPST_CHK_OP_VALID;
  204. }
  205. }
  206. break;
  207. case IB_QPT_UC:
  208. switch (qp->resp.opcode) {
  209. case IB_OPCODE_UC_SEND_FIRST:
  210. case IB_OPCODE_UC_SEND_MIDDLE:
  211. switch (pkt->opcode) {
  212. case IB_OPCODE_UC_SEND_MIDDLE:
  213. case IB_OPCODE_UC_SEND_LAST:
  214. case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
  215. return RESPST_CHK_OP_VALID;
  216. default:
  217. return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
  218. }
  219. case IB_OPCODE_UC_RDMA_WRITE_FIRST:
  220. case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
  221. switch (pkt->opcode) {
  222. case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
  223. case IB_OPCODE_UC_RDMA_WRITE_LAST:
  224. case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  225. return RESPST_CHK_OP_VALID;
  226. default:
  227. return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
  228. }
  229. default:
  230. switch (pkt->opcode) {
  231. case IB_OPCODE_UC_SEND_MIDDLE:
  232. case IB_OPCODE_UC_SEND_LAST:
  233. case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
  234. case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
  235. case IB_OPCODE_UC_RDMA_WRITE_LAST:
  236. case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  237. qp->resp.drop_msg = 1;
  238. return RESPST_CLEANUP;
  239. default:
  240. return RESPST_CHK_OP_VALID;
  241. }
  242. }
  243. break;
  244. default:
  245. return RESPST_CHK_OP_VALID;
  246. }
  247. }
  248. static enum resp_states check_op_valid(struct rxe_qp *qp,
  249. struct rxe_pkt_info *pkt)
  250. {
  251. switch (qp_type(qp)) {
  252. case IB_QPT_RC:
  253. if (((pkt->mask & RXE_READ_MASK) &&
  254. !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
  255. ((pkt->mask & RXE_WRITE_MASK) &&
  256. !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
  257. ((pkt->mask & RXE_ATOMIC_MASK) &&
  258. !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
  259. return RESPST_ERR_UNSUPPORTED_OPCODE;
  260. }
  261. break;
  262. case IB_QPT_UC:
  263. if ((pkt->mask & RXE_WRITE_MASK) &&
  264. !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
  265. qp->resp.drop_msg = 1;
  266. return RESPST_CLEANUP;
  267. }
  268. break;
  269. case IB_QPT_UD:
  270. case IB_QPT_SMI:
  271. case IB_QPT_GSI:
  272. break;
  273. default:
  274. WARN_ON(1);
  275. break;
  276. }
  277. return RESPST_CHK_RESOURCE;
  278. }
  279. static enum resp_states get_srq_wqe(struct rxe_qp *qp)
  280. {
  281. struct rxe_srq *srq = qp->srq;
  282. struct rxe_queue *q = srq->rq.queue;
  283. struct rxe_recv_wqe *wqe;
  284. struct ib_event ev;
  285. if (srq->error)
  286. return RESPST_ERR_RNR;
  287. spin_lock_bh(&srq->rq.consumer_lock);
  288. wqe = queue_head(q);
  289. if (!wqe) {
  290. spin_unlock_bh(&srq->rq.consumer_lock);
  291. return RESPST_ERR_RNR;
  292. }
  293. /* note kernel and user space recv wqes have same size */
  294. memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
  295. qp->resp.wqe = &qp->resp.srq_wqe.wqe;
  296. advance_consumer(q);
  297. if (srq->limit && srq->ibsrq.event_handler &&
  298. (queue_count(q) < srq->limit)) {
  299. srq->limit = 0;
  300. goto event;
  301. }
  302. spin_unlock_bh(&srq->rq.consumer_lock);
  303. return RESPST_CHK_LENGTH;
  304. event:
  305. spin_unlock_bh(&srq->rq.consumer_lock);
  306. ev.device = qp->ibqp.device;
  307. ev.element.srq = qp->ibqp.srq;
  308. ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
  309. srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
  310. return RESPST_CHK_LENGTH;
  311. }
  312. static enum resp_states check_resource(struct rxe_qp *qp,
  313. struct rxe_pkt_info *pkt)
  314. {
  315. struct rxe_srq *srq = qp->srq;
  316. if (qp->resp.state == QP_STATE_ERROR) {
  317. if (qp->resp.wqe) {
  318. qp->resp.status = IB_WC_WR_FLUSH_ERR;
  319. return RESPST_COMPLETE;
  320. } else if (!srq) {
  321. qp->resp.wqe = queue_head(qp->rq.queue);
  322. if (qp->resp.wqe) {
  323. qp->resp.status = IB_WC_WR_FLUSH_ERR;
  324. return RESPST_COMPLETE;
  325. } else {
  326. return RESPST_EXIT;
  327. }
  328. } else {
  329. return RESPST_EXIT;
  330. }
  331. }
  332. if (pkt->mask & RXE_READ_OR_ATOMIC) {
  333. /* it is the requesters job to not send
  334. * too many read/atomic ops, we just
  335. * recycle the responder resource queue
  336. */
  337. if (likely(qp->attr.max_rd_atomic > 0))
  338. return RESPST_CHK_LENGTH;
  339. else
  340. return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
  341. }
  342. if (pkt->mask & RXE_RWR_MASK) {
  343. if (srq)
  344. return get_srq_wqe(qp);
  345. qp->resp.wqe = queue_head(qp->rq.queue);
  346. return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
  347. }
  348. return RESPST_CHK_LENGTH;
  349. }
  350. static enum resp_states check_length(struct rxe_qp *qp,
  351. struct rxe_pkt_info *pkt)
  352. {
  353. switch (qp_type(qp)) {
  354. case IB_QPT_RC:
  355. return RESPST_CHK_RKEY;
  356. case IB_QPT_UC:
  357. return RESPST_CHK_RKEY;
  358. default:
  359. return RESPST_CHK_RKEY;
  360. }
  361. }
  362. static enum resp_states check_rkey(struct rxe_qp *qp,
  363. struct rxe_pkt_info *pkt)
  364. {
  365. struct rxe_mem *mem;
  366. u64 va;
  367. u32 rkey;
  368. u32 resid;
  369. u32 pktlen;
  370. int mtu = qp->mtu;
  371. enum resp_states state;
  372. int access;
  373. if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
  374. if (pkt->mask & RXE_RETH_MASK) {
  375. qp->resp.va = reth_va(pkt);
  376. qp->resp.rkey = reth_rkey(pkt);
  377. qp->resp.resid = reth_len(pkt);
  378. }
  379. access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
  380. : IB_ACCESS_REMOTE_WRITE;
  381. } else if (pkt->mask & RXE_ATOMIC_MASK) {
  382. qp->resp.va = atmeth_va(pkt);
  383. qp->resp.rkey = atmeth_rkey(pkt);
  384. qp->resp.resid = sizeof(u64);
  385. access = IB_ACCESS_REMOTE_ATOMIC;
  386. } else {
  387. return RESPST_EXECUTE;
  388. }
  389. va = qp->resp.va;
  390. rkey = qp->resp.rkey;
  391. resid = qp->resp.resid;
  392. pktlen = payload_size(pkt);
  393. mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
  394. if (!mem) {
  395. state = RESPST_ERR_RKEY_VIOLATION;
  396. goto err1;
  397. }
  398. if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
  399. state = RESPST_ERR_RKEY_VIOLATION;
  400. goto err1;
  401. }
  402. if (mem_check_range(mem, va, resid)) {
  403. state = RESPST_ERR_RKEY_VIOLATION;
  404. goto err2;
  405. }
  406. if (pkt->mask & RXE_WRITE_MASK) {
  407. if (resid > mtu) {
  408. if (pktlen != mtu || bth_pad(pkt)) {
  409. state = RESPST_ERR_LENGTH;
  410. goto err2;
  411. }
  412. resid = mtu;
  413. } else {
  414. if (pktlen != resid) {
  415. state = RESPST_ERR_LENGTH;
  416. goto err2;
  417. }
  418. if ((bth_pad(pkt) != (0x3 & (-resid)))) {
  419. /* This case may not be exactly that
  420. * but nothing else fits.
  421. */
  422. state = RESPST_ERR_LENGTH;
  423. goto err2;
  424. }
  425. }
  426. }
  427. WARN_ON(qp->resp.mr);
  428. qp->resp.mr = mem;
  429. return RESPST_EXECUTE;
  430. err2:
  431. rxe_drop_ref(mem);
  432. err1:
  433. return state;
  434. }
  435. static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
  436. int data_len)
  437. {
  438. int err;
  439. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  440. err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
  441. data_addr, data_len, to_mem_obj, NULL);
  442. if (unlikely(err))
  443. return (err == -ENOSPC) ? RESPST_ERR_LENGTH
  444. : RESPST_ERR_MALFORMED_WQE;
  445. return RESPST_NONE;
  446. }
  447. static enum resp_states write_data_in(struct rxe_qp *qp,
  448. struct rxe_pkt_info *pkt)
  449. {
  450. enum resp_states rc = RESPST_NONE;
  451. int err;
  452. int data_len = payload_size(pkt);
  453. err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
  454. data_len, to_mem_obj, NULL);
  455. if (err) {
  456. rc = RESPST_ERR_RKEY_VIOLATION;
  457. goto out;
  458. }
  459. qp->resp.va += data_len;
  460. qp->resp.resid -= data_len;
  461. out:
  462. return rc;
  463. }
  464. /* Guarantee atomicity of atomic operations at the machine level. */
  465. static DEFINE_SPINLOCK(atomic_ops_lock);
  466. static enum resp_states process_atomic(struct rxe_qp *qp,
  467. struct rxe_pkt_info *pkt)
  468. {
  469. u64 iova = atmeth_va(pkt);
  470. u64 *vaddr;
  471. enum resp_states ret;
  472. struct rxe_mem *mr = qp->resp.mr;
  473. if (mr->state != RXE_MEM_STATE_VALID) {
  474. ret = RESPST_ERR_RKEY_VIOLATION;
  475. goto out;
  476. }
  477. vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
  478. /* check vaddr is 8 bytes aligned. */
  479. if (!vaddr || (uintptr_t)vaddr & 7) {
  480. ret = RESPST_ERR_MISALIGNED_ATOMIC;
  481. goto out;
  482. }
  483. spin_lock_bh(&atomic_ops_lock);
  484. qp->resp.atomic_orig = *vaddr;
  485. if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
  486. pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
  487. if (*vaddr == atmeth_comp(pkt))
  488. *vaddr = atmeth_swap_add(pkt);
  489. } else {
  490. *vaddr += atmeth_swap_add(pkt);
  491. }
  492. spin_unlock_bh(&atomic_ops_lock);
  493. ret = RESPST_NONE;
  494. out:
  495. return ret;
  496. }
  497. static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
  498. struct rxe_pkt_info *pkt,
  499. struct rxe_pkt_info *ack,
  500. int opcode,
  501. int payload,
  502. u32 psn,
  503. u8 syndrome,
  504. u32 *crcp)
  505. {
  506. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  507. struct sk_buff *skb;
  508. u32 crc = 0;
  509. u32 *p;
  510. int paylen;
  511. int pad;
  512. int err;
  513. /*
  514. * allocate packet
  515. */
  516. pad = (-payload) & 0x3;
  517. paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
  518. skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
  519. if (!skb)
  520. return NULL;
  521. ack->qp = qp;
  522. ack->opcode = opcode;
  523. ack->mask = rxe_opcode[opcode].mask;
  524. ack->offset = pkt->offset;
  525. ack->paylen = paylen;
  526. /* fill in bth using the request packet headers */
  527. memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
  528. bth_set_opcode(ack, opcode);
  529. bth_set_qpn(ack, qp->attr.dest_qp_num);
  530. bth_set_pad(ack, pad);
  531. bth_set_se(ack, 0);
  532. bth_set_psn(ack, psn);
  533. bth_set_ack(ack, 0);
  534. ack->psn = psn;
  535. if (ack->mask & RXE_AETH_MASK) {
  536. aeth_set_syn(ack, syndrome);
  537. aeth_set_msn(ack, qp->resp.msn);
  538. }
  539. if (ack->mask & RXE_ATMACK_MASK)
  540. atmack_set_orig(ack, qp->resp.atomic_orig);
  541. err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
  542. if (err) {
  543. kfree_skb(skb);
  544. return NULL;
  545. }
  546. if (crcp) {
  547. /* CRC computation will be continued by the caller */
  548. *crcp = crc;
  549. } else {
  550. p = payload_addr(ack) + payload + bth_pad(ack);
  551. *p = ~crc;
  552. }
  553. return skb;
  554. }
  555. /* RDMA read response. If res is not NULL, then we have a current RDMA request
  556. * being processed or replayed.
  557. */
  558. static enum resp_states read_reply(struct rxe_qp *qp,
  559. struct rxe_pkt_info *req_pkt)
  560. {
  561. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  562. struct rxe_pkt_info ack_pkt;
  563. struct sk_buff *skb;
  564. int mtu = qp->mtu;
  565. enum resp_states state;
  566. int payload;
  567. int opcode;
  568. int err;
  569. struct resp_res *res = qp->resp.res;
  570. u32 icrc;
  571. u32 *p;
  572. if (!res) {
  573. /* This is the first time we process that request. Get a
  574. * resource
  575. */
  576. res = &qp->resp.resources[qp->resp.res_head];
  577. free_rd_atomic_resource(qp, res);
  578. rxe_advance_resp_resource(qp);
  579. res->type = RXE_READ_MASK;
  580. res->read.va = qp->resp.va;
  581. res->read.va_org = qp->resp.va;
  582. res->first_psn = req_pkt->psn;
  583. res->last_psn = req_pkt->psn +
  584. (reth_len(req_pkt) + mtu - 1) /
  585. mtu - 1;
  586. res->cur_psn = req_pkt->psn;
  587. res->read.resid = qp->resp.resid;
  588. res->read.length = qp->resp.resid;
  589. res->read.rkey = qp->resp.rkey;
  590. /* note res inherits the reference to mr from qp */
  591. res->read.mr = qp->resp.mr;
  592. qp->resp.mr = NULL;
  593. qp->resp.res = res;
  594. res->state = rdatm_res_state_new;
  595. }
  596. if (res->state == rdatm_res_state_new) {
  597. if (res->read.resid <= mtu)
  598. opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
  599. else
  600. opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
  601. } else {
  602. if (res->read.resid > mtu)
  603. opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
  604. else
  605. opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
  606. }
  607. res->state = rdatm_res_state_next;
  608. payload = min_t(int, res->read.resid, mtu);
  609. skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
  610. res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
  611. if (!skb)
  612. return RESPST_ERR_RNR;
  613. err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
  614. payload, from_mem_obj, &icrc);
  615. if (err)
  616. pr_err("Failed copying memory\n");
  617. p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
  618. *p = ~icrc;
  619. err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
  620. if (err) {
  621. pr_err("Failed sending RDMA reply.\n");
  622. kfree_skb(skb);
  623. return RESPST_ERR_RNR;
  624. }
  625. res->read.va += payload;
  626. res->read.resid -= payload;
  627. res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
  628. if (res->read.resid > 0) {
  629. state = RESPST_DONE;
  630. } else {
  631. qp->resp.res = NULL;
  632. qp->resp.opcode = -1;
  633. qp->resp.psn = res->cur_psn;
  634. state = RESPST_CLEANUP;
  635. }
  636. return state;
  637. }
  638. /* Executes a new request. A retried request never reach that function (send
  639. * and writes are discarded, and reads and atomics are retried elsewhere.
  640. */
  641. static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
  642. {
  643. enum resp_states err;
  644. if (pkt->mask & RXE_SEND_MASK) {
  645. if (qp_type(qp) == IB_QPT_UD ||
  646. qp_type(qp) == IB_QPT_SMI ||
  647. qp_type(qp) == IB_QPT_GSI) {
  648. union rdma_network_hdr hdr;
  649. struct sk_buff *skb = PKT_TO_SKB(pkt);
  650. memset(&hdr, 0, sizeof(hdr));
  651. if (skb->protocol == htons(ETH_P_IP))
  652. memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
  653. else if (skb->protocol == htons(ETH_P_IPV6))
  654. memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
  655. err = send_data_in(qp, &hdr, sizeof(hdr));
  656. if (err)
  657. return err;
  658. }
  659. err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
  660. if (err)
  661. return err;
  662. } else if (pkt->mask & RXE_WRITE_MASK) {
  663. err = write_data_in(qp, pkt);
  664. if (err)
  665. return err;
  666. } else if (pkt->mask & RXE_READ_MASK) {
  667. /* For RDMA Read we can increment the msn now. See C9-148. */
  668. qp->resp.msn++;
  669. return RESPST_READ_REPLY;
  670. } else if (pkt->mask & RXE_ATOMIC_MASK) {
  671. err = process_atomic(qp, pkt);
  672. if (err)
  673. return err;
  674. } else
  675. /* Unreachable */
  676. WARN_ON(1);
  677. /* We successfully processed this new request. */
  678. qp->resp.msn++;
  679. /* next expected psn, read handles this separately */
  680. qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
  681. qp->resp.opcode = pkt->opcode;
  682. qp->resp.status = IB_WC_SUCCESS;
  683. if (pkt->mask & RXE_COMP_MASK)
  684. return RESPST_COMPLETE;
  685. else if (qp_type(qp) == IB_QPT_RC)
  686. return RESPST_ACKNOWLEDGE;
  687. else
  688. return RESPST_CLEANUP;
  689. }
  690. static enum resp_states do_complete(struct rxe_qp *qp,
  691. struct rxe_pkt_info *pkt)
  692. {
  693. struct rxe_cqe cqe;
  694. struct ib_wc *wc = &cqe.ibwc;
  695. struct ib_uverbs_wc *uwc = &cqe.uibwc;
  696. struct rxe_recv_wqe *wqe = qp->resp.wqe;
  697. if (unlikely(!wqe))
  698. return RESPST_CLEANUP;
  699. memset(&cqe, 0, sizeof(cqe));
  700. wc->wr_id = wqe->wr_id;
  701. wc->status = qp->resp.status;
  702. wc->qp = &qp->ibqp;
  703. /* fields after status are not required for errors */
  704. if (wc->status == IB_WC_SUCCESS) {
  705. wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
  706. pkt->mask & RXE_WRITE_MASK) ?
  707. IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
  708. wc->vendor_err = 0;
  709. wc->byte_len = wqe->dma.length - wqe->dma.resid;
  710. /* fields after byte_len are different between kernel and user
  711. * space
  712. */
  713. if (qp->rcq->is_user) {
  714. uwc->wc_flags = IB_WC_GRH;
  715. if (pkt->mask & RXE_IMMDT_MASK) {
  716. uwc->wc_flags |= IB_WC_WITH_IMM;
  717. uwc->ex.imm_data =
  718. (__u32 __force)immdt_imm(pkt);
  719. }
  720. if (pkt->mask & RXE_IETH_MASK) {
  721. uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
  722. uwc->ex.invalidate_rkey = ieth_rkey(pkt);
  723. }
  724. uwc->qp_num = qp->ibqp.qp_num;
  725. if (pkt->mask & RXE_DETH_MASK)
  726. uwc->src_qp = deth_sqp(pkt);
  727. uwc->port_num = qp->attr.port_num;
  728. } else {
  729. struct sk_buff *skb = PKT_TO_SKB(pkt);
  730. wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
  731. if (skb->protocol == htons(ETH_P_IP))
  732. wc->network_hdr_type = RDMA_NETWORK_IPV4;
  733. else
  734. wc->network_hdr_type = RDMA_NETWORK_IPV6;
  735. if (pkt->mask & RXE_IMMDT_MASK) {
  736. wc->wc_flags |= IB_WC_WITH_IMM;
  737. wc->ex.imm_data = immdt_imm(pkt);
  738. }
  739. if (pkt->mask & RXE_IETH_MASK) {
  740. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  741. struct rxe_mem *rmr;
  742. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  743. wc->ex.invalidate_rkey = ieth_rkey(pkt);
  744. rmr = rxe_pool_get_index(&rxe->mr_pool,
  745. wc->ex.invalidate_rkey >> 8);
  746. if (unlikely(!rmr)) {
  747. pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
  748. return RESPST_ERROR;
  749. }
  750. rmr->state = RXE_MEM_STATE_FREE;
  751. }
  752. wc->qp = &qp->ibqp;
  753. if (pkt->mask & RXE_DETH_MASK)
  754. wc->src_qp = deth_sqp(pkt);
  755. wc->port_num = qp->attr.port_num;
  756. }
  757. }
  758. /* have copy for srq and reference for !srq */
  759. if (!qp->srq)
  760. advance_consumer(qp->rq.queue);
  761. qp->resp.wqe = NULL;
  762. if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
  763. return RESPST_ERR_CQ_OVERFLOW;
  764. if (qp->resp.state == QP_STATE_ERROR)
  765. return RESPST_CHK_RESOURCE;
  766. if (!pkt)
  767. return RESPST_DONE;
  768. else if (qp_type(qp) == IB_QPT_RC)
  769. return RESPST_ACKNOWLEDGE;
  770. else
  771. return RESPST_CLEANUP;
  772. }
  773. static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
  774. u8 syndrome, u32 psn)
  775. {
  776. int err = 0;
  777. struct rxe_pkt_info ack_pkt;
  778. struct sk_buff *skb;
  779. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  780. skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
  781. 0, psn, syndrome, NULL);
  782. if (!skb) {
  783. err = -ENOMEM;
  784. goto err1;
  785. }
  786. err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
  787. if (err) {
  788. pr_err_ratelimited("Failed sending ack\n");
  789. kfree_skb(skb);
  790. }
  791. err1:
  792. return err;
  793. }
  794. static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
  795. u8 syndrome)
  796. {
  797. int rc = 0;
  798. struct rxe_pkt_info ack_pkt;
  799. struct sk_buff *skb;
  800. struct sk_buff *skb_copy;
  801. struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  802. struct resp_res *res;
  803. skb = prepare_ack_packet(qp, pkt, &ack_pkt,
  804. IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
  805. syndrome, NULL);
  806. if (!skb) {
  807. rc = -ENOMEM;
  808. goto out;
  809. }
  810. skb_copy = skb_clone(skb, GFP_ATOMIC);
  811. if (skb_copy)
  812. rxe_add_ref(qp); /* for the new SKB */
  813. else {
  814. pr_warn("Could not clone atomic response\n");
  815. rc = -ENOMEM;
  816. goto out;
  817. }
  818. res = &qp->resp.resources[qp->resp.res_head];
  819. free_rd_atomic_resource(qp, res);
  820. rxe_advance_resp_resource(qp);
  821. res->type = RXE_ATOMIC_MASK;
  822. res->atomic.skb = skb;
  823. res->first_psn = qp->resp.psn;
  824. res->last_psn = qp->resp.psn;
  825. res->cur_psn = qp->resp.psn;
  826. rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
  827. if (rc) {
  828. pr_err_ratelimited("Failed sending ack\n");
  829. rxe_drop_ref(qp);
  830. kfree_skb(skb_copy);
  831. }
  832. out:
  833. return rc;
  834. }
  835. static enum resp_states acknowledge(struct rxe_qp *qp,
  836. struct rxe_pkt_info *pkt)
  837. {
  838. if (qp_type(qp) != IB_QPT_RC)
  839. return RESPST_CLEANUP;
  840. if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
  841. send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
  842. else if (pkt->mask & RXE_ATOMIC_MASK)
  843. send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
  844. else if (bth_ack(pkt))
  845. send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
  846. return RESPST_CLEANUP;
  847. }
  848. static enum resp_states cleanup(struct rxe_qp *qp,
  849. struct rxe_pkt_info *pkt)
  850. {
  851. struct sk_buff *skb;
  852. if (pkt) {
  853. skb = skb_dequeue(&qp->req_pkts);
  854. rxe_drop_ref(qp);
  855. kfree_skb(skb);
  856. }
  857. if (qp->resp.mr) {
  858. rxe_drop_ref(qp->resp.mr);
  859. qp->resp.mr = NULL;
  860. }
  861. return RESPST_DONE;
  862. }
  863. static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
  864. {
  865. int i;
  866. for (i = 0; i < qp->attr.max_rd_atomic; i++) {
  867. struct resp_res *res = &qp->resp.resources[i];
  868. if (res->type == 0)
  869. continue;
  870. if (psn_compare(psn, res->first_psn) >= 0 &&
  871. psn_compare(psn, res->last_psn) <= 0) {
  872. return res;
  873. }
  874. }
  875. return NULL;
  876. }
  877. static enum resp_states duplicate_request(struct rxe_qp *qp,
  878. struct rxe_pkt_info *pkt)
  879. {
  880. enum resp_states rc;
  881. if (pkt->mask & RXE_SEND_MASK ||
  882. pkt->mask & RXE_WRITE_MASK) {
  883. /* SEND. Ack again and cleanup. C9-105. */
  884. if (bth_ack(pkt))
  885. send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
  886. rc = RESPST_CLEANUP;
  887. goto out;
  888. } else if (pkt->mask & RXE_READ_MASK) {
  889. struct resp_res *res;
  890. res = find_resource(qp, pkt->psn);
  891. if (!res) {
  892. /* Resource not found. Class D error. Drop the
  893. * request.
  894. */
  895. rc = RESPST_CLEANUP;
  896. goto out;
  897. } else {
  898. /* Ensure this new request is the same as the previous
  899. * one or a subset of it.
  900. */
  901. u64 iova = reth_va(pkt);
  902. u32 resid = reth_len(pkt);
  903. if (iova < res->read.va_org ||
  904. resid > res->read.length ||
  905. (iova + resid) > (res->read.va_org +
  906. res->read.length)) {
  907. rc = RESPST_CLEANUP;
  908. goto out;
  909. }
  910. if (reth_rkey(pkt) != res->read.rkey) {
  911. rc = RESPST_CLEANUP;
  912. goto out;
  913. }
  914. res->cur_psn = pkt->psn;
  915. res->state = (pkt->psn == res->first_psn) ?
  916. rdatm_res_state_new :
  917. rdatm_res_state_replay;
  918. /* Reset the resource, except length. */
  919. res->read.va_org = iova;
  920. res->read.va = iova;
  921. res->read.resid = resid;
  922. /* Replay the RDMA read reply. */
  923. qp->resp.res = res;
  924. rc = RESPST_READ_REPLY;
  925. goto out;
  926. }
  927. } else {
  928. struct resp_res *res;
  929. /* Find the operation in our list of responder resources. */
  930. res = find_resource(qp, pkt->psn);
  931. if (res) {
  932. struct sk_buff *skb_copy;
  933. skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
  934. if (skb_copy) {
  935. rxe_add_ref(qp); /* for the new SKB */
  936. } else {
  937. pr_warn("Couldn't clone atomic resp\n");
  938. rc = RESPST_CLEANUP;
  939. goto out;
  940. }
  941. bth_set_psn(SKB_TO_PKT(skb_copy),
  942. qp->resp.psn - 1);
  943. /* Resend the result. */
  944. rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
  945. pkt, skb_copy);
  946. if (rc) {
  947. pr_err("Failed resending result. This flow is not handled - skb ignored\n");
  948. kfree_skb(skb_copy);
  949. rc = RESPST_CLEANUP;
  950. goto out;
  951. }
  952. }
  953. /* Resource not found. Class D error. Drop the request. */
  954. rc = RESPST_CLEANUP;
  955. goto out;
  956. }
  957. out:
  958. return rc;
  959. }
  960. /* Process a class A or C. Both are treated the same in this implementation. */
  961. static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
  962. enum ib_wc_status status)
  963. {
  964. qp->resp.aeth_syndrome = syndrome;
  965. qp->resp.status = status;
  966. /* indicate that we should go through the ERROR state */
  967. qp->resp.goto_error = 1;
  968. }
  969. static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
  970. {
  971. /* UC */
  972. if (qp->srq) {
  973. /* Class E */
  974. qp->resp.drop_msg = 1;
  975. if (qp->resp.wqe) {
  976. qp->resp.status = IB_WC_REM_INV_REQ_ERR;
  977. return RESPST_COMPLETE;
  978. } else {
  979. return RESPST_CLEANUP;
  980. }
  981. } else {
  982. /* Class D1. This packet may be the start of a
  983. * new message and could be valid. The previous
  984. * message is invalid and ignored. reset the
  985. * recv wr to its original state
  986. */
  987. if (qp->resp.wqe) {
  988. qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
  989. qp->resp.wqe->dma.cur_sge = 0;
  990. qp->resp.wqe->dma.sge_offset = 0;
  991. qp->resp.opcode = -1;
  992. }
  993. if (qp->resp.mr) {
  994. rxe_drop_ref(qp->resp.mr);
  995. qp->resp.mr = NULL;
  996. }
  997. return RESPST_CLEANUP;
  998. }
  999. }
  1000. int rxe_responder(void *arg)
  1001. {
  1002. struct rxe_qp *qp = (struct rxe_qp *)arg;
  1003. enum resp_states state;
  1004. struct rxe_pkt_info *pkt = NULL;
  1005. int ret = 0;
  1006. qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
  1007. if (!qp->valid) {
  1008. ret = -EINVAL;
  1009. goto done;
  1010. }
  1011. switch (qp->resp.state) {
  1012. case QP_STATE_RESET:
  1013. state = RESPST_RESET;
  1014. break;
  1015. default:
  1016. state = RESPST_GET_REQ;
  1017. break;
  1018. }
  1019. while (1) {
  1020. pr_debug("state = %s\n", resp_state_name[state]);
  1021. switch (state) {
  1022. case RESPST_GET_REQ:
  1023. state = get_req(qp, &pkt);
  1024. break;
  1025. case RESPST_CHK_PSN:
  1026. state = check_psn(qp, pkt);
  1027. break;
  1028. case RESPST_CHK_OP_SEQ:
  1029. state = check_op_seq(qp, pkt);
  1030. break;
  1031. case RESPST_CHK_OP_VALID:
  1032. state = check_op_valid(qp, pkt);
  1033. break;
  1034. case RESPST_CHK_RESOURCE:
  1035. state = check_resource(qp, pkt);
  1036. break;
  1037. case RESPST_CHK_LENGTH:
  1038. state = check_length(qp, pkt);
  1039. break;
  1040. case RESPST_CHK_RKEY:
  1041. state = check_rkey(qp, pkt);
  1042. break;
  1043. case RESPST_EXECUTE:
  1044. state = execute(qp, pkt);
  1045. break;
  1046. case RESPST_COMPLETE:
  1047. state = do_complete(qp, pkt);
  1048. break;
  1049. case RESPST_READ_REPLY:
  1050. state = read_reply(qp, pkt);
  1051. break;
  1052. case RESPST_ACKNOWLEDGE:
  1053. state = acknowledge(qp, pkt);
  1054. break;
  1055. case RESPST_CLEANUP:
  1056. state = cleanup(qp, pkt);
  1057. break;
  1058. case RESPST_DUPLICATE_REQUEST:
  1059. state = duplicate_request(qp, pkt);
  1060. break;
  1061. case RESPST_ERR_PSN_OUT_OF_SEQ:
  1062. /* RC only - Class B. Drop packet. */
  1063. send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
  1064. state = RESPST_CLEANUP;
  1065. break;
  1066. case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
  1067. case RESPST_ERR_MISSING_OPCODE_FIRST:
  1068. case RESPST_ERR_MISSING_OPCODE_LAST_C:
  1069. case RESPST_ERR_UNSUPPORTED_OPCODE:
  1070. case RESPST_ERR_MISALIGNED_ATOMIC:
  1071. /* RC Only - Class C. */
  1072. do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
  1073. IB_WC_REM_INV_REQ_ERR);
  1074. state = RESPST_COMPLETE;
  1075. break;
  1076. case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
  1077. state = do_class_d1e_error(qp);
  1078. break;
  1079. case RESPST_ERR_RNR:
  1080. if (qp_type(qp) == IB_QPT_RC) {
  1081. /* RC - class B */
  1082. send_ack(qp, pkt, AETH_RNR_NAK |
  1083. (~AETH_TYPE_MASK &
  1084. qp->attr.min_rnr_timer),
  1085. pkt->psn);
  1086. } else {
  1087. /* UD/UC - class D */
  1088. qp->resp.drop_msg = 1;
  1089. }
  1090. state = RESPST_CLEANUP;
  1091. break;
  1092. case RESPST_ERR_RKEY_VIOLATION:
  1093. if (qp_type(qp) == IB_QPT_RC) {
  1094. /* Class C */
  1095. do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
  1096. IB_WC_REM_ACCESS_ERR);
  1097. state = RESPST_COMPLETE;
  1098. } else {
  1099. qp->resp.drop_msg = 1;
  1100. if (qp->srq) {
  1101. /* UC/SRQ Class D */
  1102. qp->resp.status = IB_WC_REM_ACCESS_ERR;
  1103. state = RESPST_COMPLETE;
  1104. } else {
  1105. /* UC/non-SRQ Class E. */
  1106. state = RESPST_CLEANUP;
  1107. }
  1108. }
  1109. break;
  1110. case RESPST_ERR_LENGTH:
  1111. if (qp_type(qp) == IB_QPT_RC) {
  1112. /* Class C */
  1113. do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
  1114. IB_WC_REM_INV_REQ_ERR);
  1115. state = RESPST_COMPLETE;
  1116. } else if (qp->srq) {
  1117. /* UC/UD - class E */
  1118. qp->resp.status = IB_WC_REM_INV_REQ_ERR;
  1119. state = RESPST_COMPLETE;
  1120. } else {
  1121. /* UC/UD - class D */
  1122. qp->resp.drop_msg = 1;
  1123. state = RESPST_CLEANUP;
  1124. }
  1125. break;
  1126. case RESPST_ERR_MALFORMED_WQE:
  1127. /* All, Class A. */
  1128. do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
  1129. IB_WC_LOC_QP_OP_ERR);
  1130. state = RESPST_COMPLETE;
  1131. break;
  1132. case RESPST_ERR_CQ_OVERFLOW:
  1133. /* All - Class G */
  1134. state = RESPST_ERROR;
  1135. break;
  1136. case RESPST_DONE:
  1137. if (qp->resp.goto_error) {
  1138. state = RESPST_ERROR;
  1139. break;
  1140. }
  1141. goto done;
  1142. case RESPST_EXIT:
  1143. if (qp->resp.goto_error) {
  1144. state = RESPST_ERROR;
  1145. break;
  1146. }
  1147. goto exit;
  1148. case RESPST_RESET: {
  1149. struct sk_buff *skb;
  1150. while ((skb = skb_dequeue(&qp->req_pkts))) {
  1151. rxe_drop_ref(qp);
  1152. kfree_skb(skb);
  1153. }
  1154. while (!qp->srq && qp->rq.queue &&
  1155. queue_head(qp->rq.queue))
  1156. advance_consumer(qp->rq.queue);
  1157. qp->resp.wqe = NULL;
  1158. goto exit;
  1159. }
  1160. case RESPST_ERROR:
  1161. qp->resp.goto_error = 0;
  1162. pr_warn("qp#%d moved to error state\n", qp_num(qp));
  1163. rxe_qp_error(qp);
  1164. goto exit;
  1165. default:
  1166. WARN_ON(1);
  1167. }
  1168. }
  1169. exit:
  1170. ret = -EAGAIN;
  1171. done:
  1172. return ret;
  1173. }