qp.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778
  1. /*
  2. * Copyright(c) 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/hash.h>
  48. #include <linux/bitops.h>
  49. #include <linux/lockdep.h>
  50. #include <linux/vmalloc.h>
  51. #include <linux/slab.h>
  52. #include <rdma/ib_verbs.h>
  53. #include "qp.h"
  54. #include "vt.h"
  55. #include "trace.h"
  56. /*
  57. * Note that it is OK to post send work requests in the SQE and ERR
  58. * states; rvt_do_send() will process them and generate error
  59. * completions as per IB 1.2 C10-96.
  60. */
  61. const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
  62. [IB_QPS_RESET] = 0,
  63. [IB_QPS_INIT] = RVT_POST_RECV_OK,
  64. [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
  65. [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
  66. RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
  67. RVT_PROCESS_NEXT_SEND_OK,
  68. [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
  69. RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
  70. [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
  71. RVT_POST_SEND_OK | RVT_FLUSH_SEND,
  72. [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
  73. RVT_POST_SEND_OK | RVT_FLUSH_SEND,
  74. };
  75. EXPORT_SYMBOL(ib_rvt_state_ops);
  76. static void get_map_page(struct rvt_qpn_table *qpt,
  77. struct rvt_qpn_map *map,
  78. gfp_t gfp)
  79. {
  80. unsigned long page = get_zeroed_page(gfp);
  81. /*
  82. * Free the page if someone raced with us installing it.
  83. */
  84. spin_lock(&qpt->lock);
  85. if (map->page)
  86. free_page(page);
  87. else
  88. map->page = (void *)page;
  89. spin_unlock(&qpt->lock);
  90. }
  91. /**
  92. * init_qpn_table - initialize the QP number table for a device
  93. * @qpt: the QPN table
  94. */
  95. static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
  96. {
  97. u32 offset, i;
  98. struct rvt_qpn_map *map;
  99. int ret = 0;
  100. if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
  101. return -EINVAL;
  102. spin_lock_init(&qpt->lock);
  103. qpt->last = rdi->dparms.qpn_start;
  104. qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
  105. /*
  106. * Drivers may want some QPs beyond what we need for verbs let them use
  107. * our qpn table. No need for two. Lets go ahead and mark the bitmaps
  108. * for those. The reserved range must be *after* the range which verbs
  109. * will pick from.
  110. */
  111. /* Figure out number of bit maps needed before reserved range */
  112. qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
  113. /* This should always be zero */
  114. offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
  115. /* Starting with the first reserved bit map */
  116. map = &qpt->map[qpt->nmaps];
  117. rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
  118. rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
  119. for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
  120. if (!map->page) {
  121. get_map_page(qpt, map, GFP_KERNEL);
  122. if (!map->page) {
  123. ret = -ENOMEM;
  124. break;
  125. }
  126. }
  127. set_bit(offset, map->page);
  128. offset++;
  129. if (offset == RVT_BITS_PER_PAGE) {
  130. /* next page */
  131. qpt->nmaps++;
  132. map++;
  133. offset = 0;
  134. }
  135. }
  136. return ret;
  137. }
  138. /**
  139. * free_qpn_table - free the QP number table for a device
  140. * @qpt: the QPN table
  141. */
  142. static void free_qpn_table(struct rvt_qpn_table *qpt)
  143. {
  144. int i;
  145. for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
  146. free_page((unsigned long)qpt->map[i].page);
  147. }
  148. /**
  149. * rvt_driver_qp_init - Init driver qp resources
  150. * @rdi: rvt dev strucutre
  151. *
  152. * Return: 0 on success
  153. */
  154. int rvt_driver_qp_init(struct rvt_dev_info *rdi)
  155. {
  156. int i;
  157. int ret = -ENOMEM;
  158. if (!rdi->dparms.qp_table_size)
  159. return -EINVAL;
  160. /*
  161. * If driver is not doing any QP allocation then make sure it is
  162. * providing the necessary QP functions.
  163. */
  164. if (!rdi->driver_f.free_all_qps ||
  165. !rdi->driver_f.qp_priv_alloc ||
  166. !rdi->driver_f.qp_priv_free ||
  167. !rdi->driver_f.notify_qp_reset)
  168. return -EINVAL;
  169. /* allocate parent object */
  170. rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
  171. rdi->dparms.node);
  172. if (!rdi->qp_dev)
  173. return -ENOMEM;
  174. /* allocate hash table */
  175. rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
  176. rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
  177. rdi->qp_dev->qp_table =
  178. kmalloc_node(rdi->qp_dev->qp_table_size *
  179. sizeof(*rdi->qp_dev->qp_table),
  180. GFP_KERNEL, rdi->dparms.node);
  181. if (!rdi->qp_dev->qp_table)
  182. goto no_qp_table;
  183. for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
  184. RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
  185. spin_lock_init(&rdi->qp_dev->qpt_lock);
  186. /* initialize qpn map */
  187. if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
  188. goto fail_table;
  189. spin_lock_init(&rdi->n_qps_lock);
  190. return 0;
  191. fail_table:
  192. kfree(rdi->qp_dev->qp_table);
  193. free_qpn_table(&rdi->qp_dev->qpn_table);
  194. no_qp_table:
  195. kfree(rdi->qp_dev);
  196. return ret;
  197. }
  198. /**
  199. * free_all_qps - check for QPs still in use
  200. * @qpt: the QP table to empty
  201. *
  202. * There should not be any QPs still in use.
  203. * Free memory for table.
  204. */
  205. static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
  206. {
  207. unsigned long flags;
  208. struct rvt_qp *qp;
  209. unsigned n, qp_inuse = 0;
  210. spinlock_t *ql; /* work around too long line below */
  211. if (rdi->driver_f.free_all_qps)
  212. qp_inuse = rdi->driver_f.free_all_qps(rdi);
  213. qp_inuse += rvt_mcast_tree_empty(rdi);
  214. if (!rdi->qp_dev)
  215. return qp_inuse;
  216. ql = &rdi->qp_dev->qpt_lock;
  217. spin_lock_irqsave(ql, flags);
  218. for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
  219. qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
  220. lockdep_is_held(ql));
  221. RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
  222. for (; qp; qp = rcu_dereference_protected(qp->next,
  223. lockdep_is_held(ql)))
  224. qp_inuse++;
  225. }
  226. spin_unlock_irqrestore(ql, flags);
  227. synchronize_rcu();
  228. return qp_inuse;
  229. }
  230. /**
  231. * rvt_qp_exit - clean up qps on device exit
  232. * @rdi: rvt dev structure
  233. *
  234. * Check for qp leaks and free resources.
  235. */
  236. void rvt_qp_exit(struct rvt_dev_info *rdi)
  237. {
  238. u32 qps_inuse = rvt_free_all_qps(rdi);
  239. if (qps_inuse)
  240. rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
  241. qps_inuse);
  242. if (!rdi->qp_dev)
  243. return;
  244. kfree(rdi->qp_dev->qp_table);
  245. free_qpn_table(&rdi->qp_dev->qpn_table);
  246. kfree(rdi->qp_dev);
  247. }
  248. static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
  249. struct rvt_qpn_map *map, unsigned off)
  250. {
  251. return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
  252. }
  253. /**
  254. * alloc_qpn - Allocate the next available qpn or zero/one for QP type
  255. * IB_QPT_SMI/IB_QPT_GSI
  256. *@rdi: rvt device info structure
  257. *@qpt: queue pair number table pointer
  258. *@port_num: IB port number, 1 based, comes from core
  259. *
  260. * Return: The queue pair number
  261. */
  262. static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
  263. enum ib_qp_type type, u8 port_num, gfp_t gfp)
  264. {
  265. u32 i, offset, max_scan, qpn;
  266. struct rvt_qpn_map *map;
  267. u32 ret;
  268. if (rdi->driver_f.alloc_qpn)
  269. return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
  270. if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
  271. unsigned n;
  272. ret = type == IB_QPT_GSI;
  273. n = 1 << (ret + 2 * (port_num - 1));
  274. spin_lock(&qpt->lock);
  275. if (qpt->flags & n)
  276. ret = -EINVAL;
  277. else
  278. qpt->flags |= n;
  279. spin_unlock(&qpt->lock);
  280. goto bail;
  281. }
  282. qpn = qpt->last + qpt->incr;
  283. if (qpn >= RVT_QPN_MAX)
  284. qpn = qpt->incr | ((qpt->last & 1) ^ 1);
  285. /* offset carries bit 0 */
  286. offset = qpn & RVT_BITS_PER_PAGE_MASK;
  287. map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
  288. max_scan = qpt->nmaps - !offset;
  289. for (i = 0;;) {
  290. if (unlikely(!map->page)) {
  291. get_map_page(qpt, map, gfp);
  292. if (unlikely(!map->page))
  293. break;
  294. }
  295. do {
  296. if (!test_and_set_bit(offset, map->page)) {
  297. qpt->last = qpn;
  298. ret = qpn;
  299. goto bail;
  300. }
  301. offset += qpt->incr;
  302. /*
  303. * This qpn might be bogus if offset >= BITS_PER_PAGE.
  304. * That is OK. It gets re-assigned below
  305. */
  306. qpn = mk_qpn(qpt, map, offset);
  307. } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
  308. /*
  309. * In order to keep the number of pages allocated to a
  310. * minimum, we scan the all existing pages before increasing
  311. * the size of the bitmap table.
  312. */
  313. if (++i > max_scan) {
  314. if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
  315. break;
  316. map = &qpt->map[qpt->nmaps++];
  317. /* start at incr with current bit 0 */
  318. offset = qpt->incr | (offset & 1);
  319. } else if (map < &qpt->map[qpt->nmaps]) {
  320. ++map;
  321. /* start at incr with current bit 0 */
  322. offset = qpt->incr | (offset & 1);
  323. } else {
  324. map = &qpt->map[0];
  325. /* wrap to first map page, invert bit 0 */
  326. offset = qpt->incr | ((offset & 1) ^ 1);
  327. }
  328. /* there can be no set bits in low-order QoS bits */
  329. WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
  330. qpn = mk_qpn(qpt, map, offset);
  331. }
  332. ret = -ENOMEM;
  333. bail:
  334. return ret;
  335. }
  336. static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
  337. {
  338. struct rvt_qpn_map *map;
  339. map = qpt->map + qpn / RVT_BITS_PER_PAGE;
  340. if (map->page)
  341. clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
  342. }
  343. /**
  344. * rvt_clear_mr_refs - Drop help mr refs
  345. * @qp: rvt qp data structure
  346. * @clr_sends: If shoudl clear send side or not
  347. */
  348. static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
  349. {
  350. unsigned n;
  351. struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
  352. if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
  353. rvt_put_ss(&qp->s_rdma_read_sge);
  354. rvt_put_ss(&qp->r_sge);
  355. if (clr_sends) {
  356. while (qp->s_last != qp->s_head) {
  357. struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  358. unsigned i;
  359. for (i = 0; i < wqe->wr.num_sge; i++) {
  360. struct rvt_sge *sge = &wqe->sg_list[i];
  361. rvt_put_mr(sge->mr);
  362. }
  363. if (qp->ibqp.qp_type == IB_QPT_UD ||
  364. qp->ibqp.qp_type == IB_QPT_SMI ||
  365. qp->ibqp.qp_type == IB_QPT_GSI)
  366. atomic_dec(&ibah_to_rvtah(
  367. wqe->ud_wr.ah)->refcount);
  368. if (++qp->s_last >= qp->s_size)
  369. qp->s_last = 0;
  370. smp_wmb(); /* see qp_set_savail */
  371. }
  372. if (qp->s_rdma_mr) {
  373. rvt_put_mr(qp->s_rdma_mr);
  374. qp->s_rdma_mr = NULL;
  375. }
  376. }
  377. if (qp->ibqp.qp_type != IB_QPT_RC)
  378. return;
  379. for (n = 0; n < rvt_max_atomic(rdi); n++) {
  380. struct rvt_ack_entry *e = &qp->s_ack_queue[n];
  381. if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
  382. e->rdma_sge.mr) {
  383. rvt_put_mr(e->rdma_sge.mr);
  384. e->rdma_sge.mr = NULL;
  385. }
  386. }
  387. }
  388. /**
  389. * rvt_remove_qp - remove qp form table
  390. * @rdi: rvt dev struct
  391. * @qp: qp to remove
  392. *
  393. * Remove the QP from the table so it can't be found asynchronously by
  394. * the receive routine.
  395. */
  396. static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
  397. {
  398. struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
  399. u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
  400. unsigned long flags;
  401. int removed = 1;
  402. spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
  403. if (rcu_dereference_protected(rvp->qp[0],
  404. lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
  405. RCU_INIT_POINTER(rvp->qp[0], NULL);
  406. } else if (rcu_dereference_protected(rvp->qp[1],
  407. lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
  408. RCU_INIT_POINTER(rvp->qp[1], NULL);
  409. } else {
  410. struct rvt_qp *q;
  411. struct rvt_qp __rcu **qpp;
  412. removed = 0;
  413. qpp = &rdi->qp_dev->qp_table[n];
  414. for (; (q = rcu_dereference_protected(*qpp,
  415. lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
  416. qpp = &q->next) {
  417. if (q == qp) {
  418. RCU_INIT_POINTER(*qpp,
  419. rcu_dereference_protected(qp->next,
  420. lockdep_is_held(&rdi->qp_dev->qpt_lock)));
  421. removed = 1;
  422. trace_rvt_qpremove(qp, n);
  423. break;
  424. }
  425. }
  426. }
  427. spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
  428. if (removed) {
  429. synchronize_rcu();
  430. if (atomic_dec_and_test(&qp->refcount))
  431. wake_up(&qp->wait);
  432. }
  433. }
  434. /**
  435. * reset_qp - initialize the QP state to the reset state
  436. * @qp: the QP to reset
  437. * @type: the QP type
  438. * r and s lock are required to be held by the caller
  439. */
  440. static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
  441. enum ib_qp_type type)
  442. __releases(&qp->s_lock)
  443. __releases(&qp->s_hlock)
  444. __releases(&qp->r_lock)
  445. __acquires(&qp->r_lock)
  446. __acquires(&qp->s_hlock)
  447. __acquires(&qp->s_lock)
  448. {
  449. if (qp->state != IB_QPS_RESET) {
  450. qp->state = IB_QPS_RESET;
  451. /* Let drivers flush their waitlist */
  452. rdi->driver_f.flush_qp_waiters(qp);
  453. qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
  454. spin_unlock(&qp->s_lock);
  455. spin_unlock(&qp->s_hlock);
  456. spin_unlock_irq(&qp->r_lock);
  457. /* Stop the send queue and the retry timer */
  458. rdi->driver_f.stop_send_queue(qp);
  459. /* Wait for things to stop */
  460. rdi->driver_f.quiesce_qp(qp);
  461. /* take qp out the hash and wait for it to be unused */
  462. rvt_remove_qp(rdi, qp);
  463. wait_event(qp->wait, !atomic_read(&qp->refcount));
  464. /* grab the lock b/c it was locked at call time */
  465. spin_lock_irq(&qp->r_lock);
  466. spin_lock(&qp->s_hlock);
  467. spin_lock(&qp->s_lock);
  468. rvt_clear_mr_refs(qp, 1);
  469. }
  470. /*
  471. * Let the driver do any tear down it needs to for a qp
  472. * that has been reset
  473. */
  474. rdi->driver_f.notify_qp_reset(qp);
  475. qp->remote_qpn = 0;
  476. qp->qkey = 0;
  477. qp->qp_access_flags = 0;
  478. qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
  479. qp->s_hdrwords = 0;
  480. qp->s_wqe = NULL;
  481. qp->s_draining = 0;
  482. qp->s_next_psn = 0;
  483. qp->s_last_psn = 0;
  484. qp->s_sending_psn = 0;
  485. qp->s_sending_hpsn = 0;
  486. qp->s_psn = 0;
  487. qp->r_psn = 0;
  488. qp->r_msn = 0;
  489. if (type == IB_QPT_RC) {
  490. qp->s_state = IB_OPCODE_RC_SEND_LAST;
  491. qp->r_state = IB_OPCODE_RC_SEND_LAST;
  492. } else {
  493. qp->s_state = IB_OPCODE_UC_SEND_LAST;
  494. qp->r_state = IB_OPCODE_UC_SEND_LAST;
  495. }
  496. qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
  497. qp->r_nak_state = 0;
  498. qp->r_aflags = 0;
  499. qp->r_flags = 0;
  500. qp->s_head = 0;
  501. qp->s_tail = 0;
  502. qp->s_cur = 0;
  503. qp->s_acked = 0;
  504. qp->s_last = 0;
  505. qp->s_ssn = 1;
  506. qp->s_lsn = 0;
  507. qp->s_mig_state = IB_MIG_MIGRATED;
  508. qp->r_head_ack_queue = 0;
  509. qp->s_tail_ack_queue = 0;
  510. qp->s_num_rd_atomic = 0;
  511. if (qp->r_rq.wq) {
  512. qp->r_rq.wq->head = 0;
  513. qp->r_rq.wq->tail = 0;
  514. }
  515. qp->r_sge.num_sge = 0;
  516. }
  517. /**
  518. * rvt_create_qp - create a queue pair for a device
  519. * @ibpd: the protection domain who's device we create the queue pair for
  520. * @init_attr: the attributes of the queue pair
  521. * @udata: user data for libibverbs.so
  522. *
  523. * Queue pair creation is mostly an rvt issue. However, drivers have their own
  524. * unique idea of what queue pair numbers mean. For instance there is a reserved
  525. * range for PSM.
  526. *
  527. * Return: the queue pair on success, otherwise returns an errno.
  528. *
  529. * Called by the ib_create_qp() core verbs function.
  530. */
  531. struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
  532. struct ib_qp_init_attr *init_attr,
  533. struct ib_udata *udata)
  534. {
  535. struct rvt_qp *qp;
  536. int err;
  537. struct rvt_swqe *swq = NULL;
  538. size_t sz;
  539. size_t sg_list_sz;
  540. struct ib_qp *ret = ERR_PTR(-ENOMEM);
  541. struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
  542. void *priv = NULL;
  543. gfp_t gfp;
  544. size_t sqsize;
  545. if (!rdi)
  546. return ERR_PTR(-EINVAL);
  547. if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
  548. init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
  549. init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
  550. return ERR_PTR(-EINVAL);
  551. /* GFP_NOIO is applicable to RC QP's only */
  552. if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
  553. init_attr->qp_type != IB_QPT_RC)
  554. return ERR_PTR(-EINVAL);
  555. gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
  556. GFP_NOIO : GFP_KERNEL;
  557. /* Check receive queue parameters if no SRQ is specified. */
  558. if (!init_attr->srq) {
  559. if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
  560. init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
  561. return ERR_PTR(-EINVAL);
  562. if (init_attr->cap.max_send_sge +
  563. init_attr->cap.max_send_wr +
  564. init_attr->cap.max_recv_sge +
  565. init_attr->cap.max_recv_wr == 0)
  566. return ERR_PTR(-EINVAL);
  567. }
  568. sqsize =
  569. init_attr->cap.max_send_wr + 1;
  570. switch (init_attr->qp_type) {
  571. case IB_QPT_SMI:
  572. case IB_QPT_GSI:
  573. if (init_attr->port_num == 0 ||
  574. init_attr->port_num > ibpd->device->phys_port_cnt)
  575. return ERR_PTR(-EINVAL);
  576. case IB_QPT_UC:
  577. case IB_QPT_RC:
  578. case IB_QPT_UD:
  579. sz = sizeof(struct rvt_sge) *
  580. init_attr->cap.max_send_sge +
  581. sizeof(struct rvt_swqe);
  582. if (gfp == GFP_NOIO)
  583. swq = __vmalloc(
  584. sqsize * sz,
  585. gfp | __GFP_ZERO, PAGE_KERNEL);
  586. else
  587. swq = vzalloc_node(
  588. sqsize * sz,
  589. rdi->dparms.node);
  590. if (!swq)
  591. return ERR_PTR(-ENOMEM);
  592. sz = sizeof(*qp);
  593. sg_list_sz = 0;
  594. if (init_attr->srq) {
  595. struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
  596. if (srq->rq.max_sge > 1)
  597. sg_list_sz = sizeof(*qp->r_sg_list) *
  598. (srq->rq.max_sge - 1);
  599. } else if (init_attr->cap.max_recv_sge > 1)
  600. sg_list_sz = sizeof(*qp->r_sg_list) *
  601. (init_attr->cap.max_recv_sge - 1);
  602. qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
  603. if (!qp)
  604. goto bail_swq;
  605. RCU_INIT_POINTER(qp->next, NULL);
  606. if (init_attr->qp_type == IB_QPT_RC) {
  607. qp->s_ack_queue =
  608. kzalloc_node(
  609. sizeof(*qp->s_ack_queue) *
  610. rvt_max_atomic(rdi),
  611. gfp,
  612. rdi->dparms.node);
  613. if (!qp->s_ack_queue)
  614. goto bail_qp;
  615. }
  616. /*
  617. * Driver needs to set up it's private QP structure and do any
  618. * initialization that is needed.
  619. */
  620. priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
  621. if (IS_ERR(priv)) {
  622. ret = priv;
  623. goto bail_qp;
  624. }
  625. qp->priv = priv;
  626. qp->timeout_jiffies =
  627. usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
  628. 1000UL);
  629. if (init_attr->srq) {
  630. sz = 0;
  631. } else {
  632. qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
  633. qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
  634. sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
  635. sizeof(struct rvt_rwqe);
  636. if (udata)
  637. qp->r_rq.wq = vmalloc_user(
  638. sizeof(struct rvt_rwq) +
  639. qp->r_rq.size * sz);
  640. else if (gfp == GFP_NOIO)
  641. qp->r_rq.wq = __vmalloc(
  642. sizeof(struct rvt_rwq) +
  643. qp->r_rq.size * sz,
  644. gfp | __GFP_ZERO, PAGE_KERNEL);
  645. else
  646. qp->r_rq.wq = vzalloc_node(
  647. sizeof(struct rvt_rwq) +
  648. qp->r_rq.size * sz,
  649. rdi->dparms.node);
  650. if (!qp->r_rq.wq)
  651. goto bail_driver_priv;
  652. }
  653. /*
  654. * ib_create_qp() will initialize qp->ibqp
  655. * except for qp->ibqp.qp_num.
  656. */
  657. spin_lock_init(&qp->r_lock);
  658. spin_lock_init(&qp->s_hlock);
  659. spin_lock_init(&qp->s_lock);
  660. spin_lock_init(&qp->r_rq.lock);
  661. atomic_set(&qp->refcount, 0);
  662. atomic_set(&qp->local_ops_pending, 0);
  663. init_waitqueue_head(&qp->wait);
  664. init_timer(&qp->s_timer);
  665. qp->s_timer.data = (unsigned long)qp;
  666. INIT_LIST_HEAD(&qp->rspwait);
  667. qp->state = IB_QPS_RESET;
  668. qp->s_wq = swq;
  669. qp->s_size = sqsize;
  670. qp->s_avail = init_attr->cap.max_send_wr;
  671. qp->s_max_sge = init_attr->cap.max_send_sge;
  672. if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
  673. qp->s_flags = RVT_S_SIGNAL_REQ_WR;
  674. err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
  675. init_attr->qp_type,
  676. init_attr->port_num, gfp);
  677. if (err < 0) {
  678. ret = ERR_PTR(err);
  679. goto bail_rq_wq;
  680. }
  681. qp->ibqp.qp_num = err;
  682. qp->port_num = init_attr->port_num;
  683. rvt_reset_qp(rdi, qp, init_attr->qp_type);
  684. break;
  685. default:
  686. /* Don't support raw QPs */
  687. return ERR_PTR(-EINVAL);
  688. }
  689. init_attr->cap.max_inline_data = 0;
  690. /*
  691. * Return the address of the RWQ as the offset to mmap.
  692. * See rvt_mmap() for details.
  693. */
  694. if (udata && udata->outlen >= sizeof(__u64)) {
  695. if (!qp->r_rq.wq) {
  696. __u64 offset = 0;
  697. err = ib_copy_to_udata(udata, &offset,
  698. sizeof(offset));
  699. if (err) {
  700. ret = ERR_PTR(err);
  701. goto bail_qpn;
  702. }
  703. } else {
  704. u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
  705. qp->ip = rvt_create_mmap_info(rdi, s,
  706. ibpd->uobject->context,
  707. qp->r_rq.wq);
  708. if (!qp->ip) {
  709. ret = ERR_PTR(-ENOMEM);
  710. goto bail_qpn;
  711. }
  712. err = ib_copy_to_udata(udata, &qp->ip->offset,
  713. sizeof(qp->ip->offset));
  714. if (err) {
  715. ret = ERR_PTR(err);
  716. goto bail_ip;
  717. }
  718. }
  719. qp->pid = current->pid;
  720. }
  721. spin_lock(&rdi->n_qps_lock);
  722. if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
  723. spin_unlock(&rdi->n_qps_lock);
  724. ret = ERR_PTR(-ENOMEM);
  725. goto bail_ip;
  726. }
  727. rdi->n_qps_allocated++;
  728. /*
  729. * Maintain a busy_jiffies variable that will be added to the timeout
  730. * period in mod_retry_timer and add_retry_timer. This busy jiffies
  731. * is scaled by the number of rc qps created for the device to reduce
  732. * the number of timeouts occurring when there is a large number of
  733. * qps. busy_jiffies is incremented every rc qp scaling interval.
  734. * The scaling interval is selected based on extensive performance
  735. * evaluation of targeted workloads.
  736. */
  737. if (init_attr->qp_type == IB_QPT_RC) {
  738. rdi->n_rc_qps++;
  739. rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
  740. }
  741. spin_unlock(&rdi->n_qps_lock);
  742. if (qp->ip) {
  743. spin_lock_irq(&rdi->pending_lock);
  744. list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
  745. spin_unlock_irq(&rdi->pending_lock);
  746. }
  747. ret = &qp->ibqp;
  748. /*
  749. * We have our QP and its good, now keep track of what types of opcodes
  750. * can be processed on this QP. We do this by keeping track of what the
  751. * 3 high order bits of the opcode are.
  752. */
  753. switch (init_attr->qp_type) {
  754. case IB_QPT_SMI:
  755. case IB_QPT_GSI:
  756. case IB_QPT_UD:
  757. qp->allowed_ops = IB_OPCODE_UD;
  758. break;
  759. case IB_QPT_RC:
  760. qp->allowed_ops = IB_OPCODE_RC;
  761. break;
  762. case IB_QPT_UC:
  763. qp->allowed_ops = IB_OPCODE_UC;
  764. break;
  765. default:
  766. ret = ERR_PTR(-EINVAL);
  767. goto bail_ip;
  768. }
  769. return ret;
  770. bail_ip:
  771. kref_put(&qp->ip->ref, rvt_release_mmap_info);
  772. bail_qpn:
  773. free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
  774. bail_rq_wq:
  775. vfree(qp->r_rq.wq);
  776. bail_driver_priv:
  777. rdi->driver_f.qp_priv_free(rdi, qp);
  778. bail_qp:
  779. kfree(qp->s_ack_queue);
  780. kfree(qp);
  781. bail_swq:
  782. vfree(swq);
  783. return ret;
  784. }
  785. /**
  786. * rvt_error_qp - put a QP into the error state
  787. * @qp: the QP to put into the error state
  788. * @err: the receive completion error to signal if a RWQE is active
  789. *
  790. * Flushes both send and receive work queues.
  791. *
  792. * Return: true if last WQE event should be generated.
  793. * The QP r_lock and s_lock should be held and interrupts disabled.
  794. * If we are already in error state, just return.
  795. */
  796. int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
  797. {
  798. struct ib_wc wc;
  799. int ret = 0;
  800. struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
  801. if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
  802. goto bail;
  803. qp->state = IB_QPS_ERR;
  804. if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
  805. qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
  806. del_timer(&qp->s_timer);
  807. }
  808. if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
  809. qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
  810. rdi->driver_f.notify_error_qp(qp);
  811. /* Schedule the sending tasklet to drain the send work queue. */
  812. if (ACCESS_ONCE(qp->s_last) != qp->s_head)
  813. rdi->driver_f.schedule_send(qp);
  814. rvt_clear_mr_refs(qp, 0);
  815. memset(&wc, 0, sizeof(wc));
  816. wc.qp = &qp->ibqp;
  817. wc.opcode = IB_WC_RECV;
  818. if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
  819. wc.wr_id = qp->r_wr_id;
  820. wc.status = err;
  821. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
  822. }
  823. wc.status = IB_WC_WR_FLUSH_ERR;
  824. if (qp->r_rq.wq) {
  825. struct rvt_rwq *wq;
  826. u32 head;
  827. u32 tail;
  828. spin_lock(&qp->r_rq.lock);
  829. /* sanity check pointers before trusting them */
  830. wq = qp->r_rq.wq;
  831. head = wq->head;
  832. if (head >= qp->r_rq.size)
  833. head = 0;
  834. tail = wq->tail;
  835. if (tail >= qp->r_rq.size)
  836. tail = 0;
  837. while (tail != head) {
  838. wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
  839. if (++tail >= qp->r_rq.size)
  840. tail = 0;
  841. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
  842. }
  843. wq->tail = tail;
  844. spin_unlock(&qp->r_rq.lock);
  845. } else if (qp->ibqp.event_handler) {
  846. ret = 1;
  847. }
  848. bail:
  849. return ret;
  850. }
  851. EXPORT_SYMBOL(rvt_error_qp);
  852. /*
  853. * Put the QP into the hash table.
  854. * The hash table holds a reference to the QP.
  855. */
  856. static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
  857. {
  858. struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
  859. unsigned long flags;
  860. atomic_inc(&qp->refcount);
  861. spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
  862. if (qp->ibqp.qp_num <= 1) {
  863. rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
  864. } else {
  865. u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
  866. qp->next = rdi->qp_dev->qp_table[n];
  867. rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
  868. trace_rvt_qpinsert(qp, n);
  869. }
  870. spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
  871. }
  872. /**
  873. * qib_modify_qp - modify the attributes of a queue pair
  874. * @ibqp: the queue pair who's attributes we're modifying
  875. * @attr: the new attributes
  876. * @attr_mask: the mask of attributes to modify
  877. * @udata: user data for libibverbs.so
  878. *
  879. * Return: 0 on success, otherwise returns an errno.
  880. */
  881. int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  882. int attr_mask, struct ib_udata *udata)
  883. {
  884. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  885. struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
  886. enum ib_qp_state cur_state, new_state;
  887. struct ib_event ev;
  888. int lastwqe = 0;
  889. int mig = 0;
  890. int pmtu = 0; /* for gcc warning only */
  891. enum rdma_link_layer link;
  892. link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
  893. spin_lock_irq(&qp->r_lock);
  894. spin_lock(&qp->s_hlock);
  895. spin_lock(&qp->s_lock);
  896. cur_state = attr_mask & IB_QP_CUR_STATE ?
  897. attr->cur_qp_state : qp->state;
  898. new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
  899. if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
  900. attr_mask, link))
  901. goto inval;
  902. if (rdi->driver_f.check_modify_qp &&
  903. rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
  904. goto inval;
  905. if (attr_mask & IB_QP_AV) {
  906. if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
  907. goto inval;
  908. if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
  909. goto inval;
  910. }
  911. if (attr_mask & IB_QP_ALT_PATH) {
  912. if (attr->alt_ah_attr.dlid >=
  913. be16_to_cpu(IB_MULTICAST_LID_BASE))
  914. goto inval;
  915. if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
  916. goto inval;
  917. if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
  918. goto inval;
  919. }
  920. if (attr_mask & IB_QP_PKEY_INDEX)
  921. if (attr->pkey_index >= rvt_get_npkeys(rdi))
  922. goto inval;
  923. if (attr_mask & IB_QP_MIN_RNR_TIMER)
  924. if (attr->min_rnr_timer > 31)
  925. goto inval;
  926. if (attr_mask & IB_QP_PORT)
  927. if (qp->ibqp.qp_type == IB_QPT_SMI ||
  928. qp->ibqp.qp_type == IB_QPT_GSI ||
  929. attr->port_num == 0 ||
  930. attr->port_num > ibqp->device->phys_port_cnt)
  931. goto inval;
  932. if (attr_mask & IB_QP_DEST_QPN)
  933. if (attr->dest_qp_num > RVT_QPN_MASK)
  934. goto inval;
  935. if (attr_mask & IB_QP_RETRY_CNT)
  936. if (attr->retry_cnt > 7)
  937. goto inval;
  938. if (attr_mask & IB_QP_RNR_RETRY)
  939. if (attr->rnr_retry > 7)
  940. goto inval;
  941. /*
  942. * Don't allow invalid path_mtu values. OK to set greater
  943. * than the active mtu (or even the max_cap, if we have tuned
  944. * that to a small mtu. We'll set qp->path_mtu
  945. * to the lesser of requested attribute mtu and active,
  946. * for packetizing messages.
  947. * Note that the QP port has to be set in INIT and MTU in RTR.
  948. */
  949. if (attr_mask & IB_QP_PATH_MTU) {
  950. pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
  951. if (pmtu < 0)
  952. goto inval;
  953. }
  954. if (attr_mask & IB_QP_PATH_MIG_STATE) {
  955. if (attr->path_mig_state == IB_MIG_REARM) {
  956. if (qp->s_mig_state == IB_MIG_ARMED)
  957. goto inval;
  958. if (new_state != IB_QPS_RTS)
  959. goto inval;
  960. } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
  961. if (qp->s_mig_state == IB_MIG_REARM)
  962. goto inval;
  963. if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
  964. goto inval;
  965. if (qp->s_mig_state == IB_MIG_ARMED)
  966. mig = 1;
  967. } else {
  968. goto inval;
  969. }
  970. }
  971. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
  972. if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
  973. goto inval;
  974. switch (new_state) {
  975. case IB_QPS_RESET:
  976. if (qp->state != IB_QPS_RESET)
  977. rvt_reset_qp(rdi, qp, ibqp->qp_type);
  978. break;
  979. case IB_QPS_RTR:
  980. /* Allow event to re-trigger if QP set to RTR more than once */
  981. qp->r_flags &= ~RVT_R_COMM_EST;
  982. qp->state = new_state;
  983. break;
  984. case IB_QPS_SQD:
  985. qp->s_draining = qp->s_last != qp->s_cur;
  986. qp->state = new_state;
  987. break;
  988. case IB_QPS_SQE:
  989. if (qp->ibqp.qp_type == IB_QPT_RC)
  990. goto inval;
  991. qp->state = new_state;
  992. break;
  993. case IB_QPS_ERR:
  994. lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  995. break;
  996. default:
  997. qp->state = new_state;
  998. break;
  999. }
  1000. if (attr_mask & IB_QP_PKEY_INDEX)
  1001. qp->s_pkey_index = attr->pkey_index;
  1002. if (attr_mask & IB_QP_PORT)
  1003. qp->port_num = attr->port_num;
  1004. if (attr_mask & IB_QP_DEST_QPN)
  1005. qp->remote_qpn = attr->dest_qp_num;
  1006. if (attr_mask & IB_QP_SQ_PSN) {
  1007. qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
  1008. qp->s_psn = qp->s_next_psn;
  1009. qp->s_sending_psn = qp->s_next_psn;
  1010. qp->s_last_psn = qp->s_next_psn - 1;
  1011. qp->s_sending_hpsn = qp->s_last_psn;
  1012. }
  1013. if (attr_mask & IB_QP_RQ_PSN)
  1014. qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
  1015. if (attr_mask & IB_QP_ACCESS_FLAGS)
  1016. qp->qp_access_flags = attr->qp_access_flags;
  1017. if (attr_mask & IB_QP_AV) {
  1018. qp->remote_ah_attr = attr->ah_attr;
  1019. qp->s_srate = attr->ah_attr.static_rate;
  1020. qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
  1021. }
  1022. if (attr_mask & IB_QP_ALT_PATH) {
  1023. qp->alt_ah_attr = attr->alt_ah_attr;
  1024. qp->s_alt_pkey_index = attr->alt_pkey_index;
  1025. }
  1026. if (attr_mask & IB_QP_PATH_MIG_STATE) {
  1027. qp->s_mig_state = attr->path_mig_state;
  1028. if (mig) {
  1029. qp->remote_ah_attr = qp->alt_ah_attr;
  1030. qp->port_num = qp->alt_ah_attr.port_num;
  1031. qp->s_pkey_index = qp->s_alt_pkey_index;
  1032. }
  1033. }
  1034. if (attr_mask & IB_QP_PATH_MTU) {
  1035. qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
  1036. qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
  1037. qp->log_pmtu = ilog2(qp->pmtu);
  1038. }
  1039. if (attr_mask & IB_QP_RETRY_CNT) {
  1040. qp->s_retry_cnt = attr->retry_cnt;
  1041. qp->s_retry = attr->retry_cnt;
  1042. }
  1043. if (attr_mask & IB_QP_RNR_RETRY) {
  1044. qp->s_rnr_retry_cnt = attr->rnr_retry;
  1045. qp->s_rnr_retry = attr->rnr_retry;
  1046. }
  1047. if (attr_mask & IB_QP_MIN_RNR_TIMER)
  1048. qp->r_min_rnr_timer = attr->min_rnr_timer;
  1049. if (attr_mask & IB_QP_TIMEOUT) {
  1050. qp->timeout = attr->timeout;
  1051. qp->timeout_jiffies =
  1052. usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
  1053. 1000UL);
  1054. }
  1055. if (attr_mask & IB_QP_QKEY)
  1056. qp->qkey = attr->qkey;
  1057. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
  1058. qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
  1059. if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
  1060. qp->s_max_rd_atomic = attr->max_rd_atomic;
  1061. if (rdi->driver_f.modify_qp)
  1062. rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
  1063. spin_unlock(&qp->s_lock);
  1064. spin_unlock(&qp->s_hlock);
  1065. spin_unlock_irq(&qp->r_lock);
  1066. if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
  1067. rvt_insert_qp(rdi, qp);
  1068. if (lastwqe) {
  1069. ev.device = qp->ibqp.device;
  1070. ev.element.qp = &qp->ibqp;
  1071. ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
  1072. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  1073. }
  1074. if (mig) {
  1075. ev.device = qp->ibqp.device;
  1076. ev.element.qp = &qp->ibqp;
  1077. ev.event = IB_EVENT_PATH_MIG;
  1078. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  1079. }
  1080. return 0;
  1081. inval:
  1082. spin_unlock(&qp->s_lock);
  1083. spin_unlock(&qp->s_hlock);
  1084. spin_unlock_irq(&qp->r_lock);
  1085. return -EINVAL;
  1086. }
  1087. /** rvt_free_qpn - Free a qpn from the bit map
  1088. * @qpt: QP table
  1089. * @qpn: queue pair number to free
  1090. */
  1091. static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
  1092. {
  1093. struct rvt_qpn_map *map;
  1094. map = qpt->map + qpn / RVT_BITS_PER_PAGE;
  1095. if (map->page)
  1096. clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
  1097. }
  1098. /**
  1099. * rvt_destroy_qp - destroy a queue pair
  1100. * @ibqp: the queue pair to destroy
  1101. *
  1102. * Note that this can be called while the QP is actively sending or
  1103. * receiving!
  1104. *
  1105. * Return: 0 on success.
  1106. */
  1107. int rvt_destroy_qp(struct ib_qp *ibqp)
  1108. {
  1109. struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
  1110. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  1111. spin_lock_irq(&qp->r_lock);
  1112. spin_lock(&qp->s_hlock);
  1113. spin_lock(&qp->s_lock);
  1114. rvt_reset_qp(rdi, qp, ibqp->qp_type);
  1115. spin_unlock(&qp->s_lock);
  1116. spin_unlock(&qp->s_hlock);
  1117. spin_unlock_irq(&qp->r_lock);
  1118. /* qpn is now available for use again */
  1119. rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
  1120. spin_lock(&rdi->n_qps_lock);
  1121. rdi->n_qps_allocated--;
  1122. if (qp->ibqp.qp_type == IB_QPT_RC) {
  1123. rdi->n_rc_qps--;
  1124. rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
  1125. }
  1126. spin_unlock(&rdi->n_qps_lock);
  1127. if (qp->ip)
  1128. kref_put(&qp->ip->ref, rvt_release_mmap_info);
  1129. else
  1130. vfree(qp->r_rq.wq);
  1131. vfree(qp->s_wq);
  1132. rdi->driver_f.qp_priv_free(rdi, qp);
  1133. kfree(qp->s_ack_queue);
  1134. kfree(qp);
  1135. return 0;
  1136. }
  1137. /**
  1138. * rvt_query_qp - query an ipbq
  1139. * @ibqp: IB qp to query
  1140. * @attr: attr struct to fill in
  1141. * @attr_mask: attr mask ignored
  1142. * @init_attr: struct to fill in
  1143. *
  1144. * Return: always 0
  1145. */
  1146. int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  1147. int attr_mask, struct ib_qp_init_attr *init_attr)
  1148. {
  1149. struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
  1150. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  1151. attr->qp_state = qp->state;
  1152. attr->cur_qp_state = attr->qp_state;
  1153. attr->path_mtu = qp->path_mtu;
  1154. attr->path_mig_state = qp->s_mig_state;
  1155. attr->qkey = qp->qkey;
  1156. attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
  1157. attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
  1158. attr->dest_qp_num = qp->remote_qpn;
  1159. attr->qp_access_flags = qp->qp_access_flags;
  1160. attr->cap.max_send_wr = qp->s_size - 1;
  1161. attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
  1162. attr->cap.max_send_sge = qp->s_max_sge;
  1163. attr->cap.max_recv_sge = qp->r_rq.max_sge;
  1164. attr->cap.max_inline_data = 0;
  1165. attr->ah_attr = qp->remote_ah_attr;
  1166. attr->alt_ah_attr = qp->alt_ah_attr;
  1167. attr->pkey_index = qp->s_pkey_index;
  1168. attr->alt_pkey_index = qp->s_alt_pkey_index;
  1169. attr->en_sqd_async_notify = 0;
  1170. attr->sq_draining = qp->s_draining;
  1171. attr->max_rd_atomic = qp->s_max_rd_atomic;
  1172. attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
  1173. attr->min_rnr_timer = qp->r_min_rnr_timer;
  1174. attr->port_num = qp->port_num;
  1175. attr->timeout = qp->timeout;
  1176. attr->retry_cnt = qp->s_retry_cnt;
  1177. attr->rnr_retry = qp->s_rnr_retry_cnt;
  1178. attr->alt_port_num = qp->alt_ah_attr.port_num;
  1179. attr->alt_timeout = qp->alt_timeout;
  1180. init_attr->event_handler = qp->ibqp.event_handler;
  1181. init_attr->qp_context = qp->ibqp.qp_context;
  1182. init_attr->send_cq = qp->ibqp.send_cq;
  1183. init_attr->recv_cq = qp->ibqp.recv_cq;
  1184. init_attr->srq = qp->ibqp.srq;
  1185. init_attr->cap = attr->cap;
  1186. if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
  1187. init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
  1188. else
  1189. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  1190. init_attr->qp_type = qp->ibqp.qp_type;
  1191. init_attr->port_num = qp->port_num;
  1192. return 0;
  1193. }
  1194. /**
  1195. * rvt_post_receive - post a receive on a QP
  1196. * @ibqp: the QP to post the receive on
  1197. * @wr: the WR to post
  1198. * @bad_wr: the first bad WR is put here
  1199. *
  1200. * This may be called from interrupt context.
  1201. *
  1202. * Return: 0 on success otherwise errno
  1203. */
  1204. int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  1205. struct ib_recv_wr **bad_wr)
  1206. {
  1207. struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
  1208. struct rvt_rwq *wq = qp->r_rq.wq;
  1209. unsigned long flags;
  1210. int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
  1211. !qp->ibqp.srq;
  1212. /* Check that state is OK to post receive. */
  1213. if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
  1214. *bad_wr = wr;
  1215. return -EINVAL;
  1216. }
  1217. for (; wr; wr = wr->next) {
  1218. struct rvt_rwqe *wqe;
  1219. u32 next;
  1220. int i;
  1221. if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
  1222. *bad_wr = wr;
  1223. return -EINVAL;
  1224. }
  1225. spin_lock_irqsave(&qp->r_rq.lock, flags);
  1226. next = wq->head + 1;
  1227. if (next >= qp->r_rq.size)
  1228. next = 0;
  1229. if (next == wq->tail) {
  1230. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  1231. *bad_wr = wr;
  1232. return -ENOMEM;
  1233. }
  1234. if (unlikely(qp_err_flush)) {
  1235. struct ib_wc wc;
  1236. memset(&wc, 0, sizeof(wc));
  1237. wc.qp = &qp->ibqp;
  1238. wc.opcode = IB_WC_RECV;
  1239. wc.wr_id = wr->wr_id;
  1240. wc.status = IB_WC_WR_FLUSH_ERR;
  1241. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
  1242. } else {
  1243. wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
  1244. wqe->wr_id = wr->wr_id;
  1245. wqe->num_sge = wr->num_sge;
  1246. for (i = 0; i < wr->num_sge; i++)
  1247. wqe->sg_list[i] = wr->sg_list[i];
  1248. /*
  1249. * Make sure queue entry is written
  1250. * before the head index.
  1251. */
  1252. smp_wmb();
  1253. wq->head = next;
  1254. }
  1255. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  1256. }
  1257. return 0;
  1258. }
  1259. /**
  1260. * rvt_qp_valid_operation - validate post send wr request
  1261. * @qp - the qp
  1262. * @post-parms - the post send table for the driver
  1263. * @wr - the work request
  1264. *
  1265. * The routine validates the operation based on the
  1266. * validation table an returns the length of the operation
  1267. * which can extend beyond the ib_send_bw. Operation
  1268. * dependent flags key atomic operation validation.
  1269. *
  1270. * There is an exception for UD qps that validates the pd and
  1271. * overrides the length to include the additional UD specific
  1272. * length.
  1273. *
  1274. * Returns a negative error or the length of the work request
  1275. * for building the swqe.
  1276. */
  1277. static inline int rvt_qp_valid_operation(
  1278. struct rvt_qp *qp,
  1279. const struct rvt_operation_params *post_parms,
  1280. struct ib_send_wr *wr)
  1281. {
  1282. int len;
  1283. if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
  1284. return -EINVAL;
  1285. if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
  1286. return -EINVAL;
  1287. if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
  1288. ibpd_to_rvtpd(qp->ibqp.pd)->user)
  1289. return -EINVAL;
  1290. if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
  1291. (wr->num_sge == 0 ||
  1292. wr->sg_list[0].length < sizeof(u64) ||
  1293. wr->sg_list[0].addr & (sizeof(u64) - 1)))
  1294. return -EINVAL;
  1295. if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
  1296. !qp->s_max_rd_atomic)
  1297. return -EINVAL;
  1298. len = post_parms[wr->opcode].length;
  1299. /* UD specific */
  1300. if (qp->ibqp.qp_type != IB_QPT_UC &&
  1301. qp->ibqp.qp_type != IB_QPT_RC) {
  1302. if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
  1303. return -EINVAL;
  1304. len = sizeof(struct ib_ud_wr);
  1305. }
  1306. return len;
  1307. }
  1308. /**
  1309. * qp_get_savail - return number of avail send entries
  1310. * @qp - the qp
  1311. *
  1312. * This assumes the s_hlock is held but the s_last
  1313. * qp variable is uncontrolled.
  1314. *
  1315. * The return is adjusted to not count device specific
  1316. * reserved operations.
  1317. */
  1318. static inline u32 qp_get_savail(struct rvt_qp *qp)
  1319. {
  1320. u32 slast;
  1321. u32 ret;
  1322. smp_read_barrier_depends(); /* see rc.c */
  1323. slast = ACCESS_ONCE(qp->s_last);
  1324. if (qp->s_head >= slast)
  1325. ret = qp->s_size - (qp->s_head - slast);
  1326. else
  1327. ret = slast - qp->s_head;
  1328. return ret - 1;
  1329. }
  1330. /**
  1331. * rvt_post_one_wr - post one RC, UC, or UD send work request
  1332. * @qp: the QP to post on
  1333. * @wr: the work request to send
  1334. */
  1335. static int rvt_post_one_wr(struct rvt_qp *qp,
  1336. struct ib_send_wr *wr,
  1337. int *call_send)
  1338. {
  1339. struct rvt_swqe *wqe;
  1340. u32 next;
  1341. int i;
  1342. int j;
  1343. int acc;
  1344. struct rvt_lkey_table *rkt;
  1345. struct rvt_pd *pd;
  1346. struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
  1347. u8 log_pmtu;
  1348. int ret;
  1349. size_t cplen;
  1350. BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
  1351. /* IB spec says that num_sge == 0 is OK. */
  1352. if (unlikely(wr->num_sge > qp->s_max_sge))
  1353. return -EINVAL;
  1354. ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
  1355. if (ret < 0)
  1356. return ret;
  1357. cplen = ret;
  1358. /*
  1359. * Local operations including fast register and local invalidate
  1360. * can be processed immediately w/o being posted to the send queue
  1361. * if neither fencing nor completion generation is needed. However,
  1362. * once fencing or completion is requested, direct processing of
  1363. * following local operations must be disabled until all the local
  1364. * operations posted to the send queue have completed. This is
  1365. * necessary to ensure the correct ordering.
  1366. */
  1367. if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) &&
  1368. !(wr->send_flags & (IB_SEND_FENCE | IB_SEND_SIGNALED)) &&
  1369. !atomic_read(&qp->local_ops_pending)) {
  1370. struct ib_reg_wr *reg = reg_wr(wr);
  1371. switch (wr->opcode) {
  1372. case IB_WR_REG_MR:
  1373. return rvt_fast_reg_mr(qp, reg->mr, reg->key,
  1374. reg->access);
  1375. case IB_WR_LOCAL_INV:
  1376. return rvt_invalidate_rkey(qp, wr->ex.invalidate_rkey);
  1377. default:
  1378. return -EINVAL;
  1379. }
  1380. }
  1381. /* check for avail */
  1382. if (unlikely(!qp->s_avail)) {
  1383. qp->s_avail = qp_get_savail(qp);
  1384. if (WARN_ON(qp->s_avail > (qp->s_size - 1)))
  1385. rvt_pr_err(rdi,
  1386. "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
  1387. qp->ibqp.qp_num, qp->s_size, qp->s_avail,
  1388. qp->s_head, qp->s_tail, qp->s_cur,
  1389. qp->s_acked, qp->s_last);
  1390. if (!qp->s_avail)
  1391. return -ENOMEM;
  1392. }
  1393. next = qp->s_head + 1;
  1394. if (next >= qp->s_size)
  1395. next = 0;
  1396. rkt = &rdi->lkey_table;
  1397. pd = ibpd_to_rvtpd(qp->ibqp.pd);
  1398. wqe = rvt_get_swqe_ptr(qp, qp->s_head);
  1399. /* cplen has length from above */
  1400. memcpy(&wqe->wr, wr, cplen);
  1401. wqe->length = 0;
  1402. j = 0;
  1403. if (wr->num_sge) {
  1404. acc = wr->opcode >= IB_WR_RDMA_READ ?
  1405. IB_ACCESS_LOCAL_WRITE : 0;
  1406. for (i = 0; i < wr->num_sge; i++) {
  1407. u32 length = wr->sg_list[i].length;
  1408. int ok;
  1409. if (length == 0)
  1410. continue;
  1411. ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
  1412. &wr->sg_list[i], acc);
  1413. if (!ok) {
  1414. ret = -EINVAL;
  1415. goto bail_inval_free;
  1416. }
  1417. wqe->length += length;
  1418. j++;
  1419. }
  1420. wqe->wr.num_sge = j;
  1421. }
  1422. /* general part of wqe valid - allow for driver checks */
  1423. if (rdi->driver_f.check_send_wqe) {
  1424. ret = rdi->driver_f.check_send_wqe(qp, wqe);
  1425. if (ret < 0)
  1426. goto bail_inval_free;
  1427. if (ret)
  1428. *call_send = ret;
  1429. }
  1430. log_pmtu = qp->log_pmtu;
  1431. if (qp->ibqp.qp_type != IB_QPT_UC &&
  1432. qp->ibqp.qp_type != IB_QPT_RC) {
  1433. struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
  1434. log_pmtu = ah->log_pmtu;
  1435. atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
  1436. }
  1437. if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
  1438. atomic_inc(&qp->local_ops_pending);
  1439. wqe->ssn = 0;
  1440. wqe->psn = 0;
  1441. wqe->lpsn = 0;
  1442. } else {
  1443. wqe->ssn = qp->s_ssn++;
  1444. wqe->psn = qp->s_next_psn;
  1445. wqe->lpsn = wqe->psn +
  1446. (wqe->length ?
  1447. ((wqe->length - 1) >> log_pmtu) :
  1448. 0);
  1449. qp->s_next_psn = wqe->lpsn + 1;
  1450. }
  1451. trace_rvt_post_one_wr(qp, wqe);
  1452. smp_wmb(); /* see request builders */
  1453. qp->s_avail--;
  1454. qp->s_head = next;
  1455. return 0;
  1456. bail_inval_free:
  1457. /* release mr holds */
  1458. while (j) {
  1459. struct rvt_sge *sge = &wqe->sg_list[--j];
  1460. rvt_put_mr(sge->mr);
  1461. }
  1462. return ret;
  1463. }
  1464. /**
  1465. * rvt_post_send - post a send on a QP
  1466. * @ibqp: the QP to post the send on
  1467. * @wr: the list of work requests to post
  1468. * @bad_wr: the first bad WR is put here
  1469. *
  1470. * This may be called from interrupt context.
  1471. *
  1472. * Return: 0 on success else errno
  1473. */
  1474. int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  1475. struct ib_send_wr **bad_wr)
  1476. {
  1477. struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
  1478. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  1479. unsigned long flags = 0;
  1480. int call_send;
  1481. unsigned nreq = 0;
  1482. int err = 0;
  1483. spin_lock_irqsave(&qp->s_hlock, flags);
  1484. /*
  1485. * Ensure QP state is such that we can send. If not bail out early,
  1486. * there is no need to do this every time we post a send.
  1487. */
  1488. if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
  1489. spin_unlock_irqrestore(&qp->s_hlock, flags);
  1490. return -EINVAL;
  1491. }
  1492. /*
  1493. * If the send queue is empty, and we only have a single WR then just go
  1494. * ahead and kick the send engine into gear. Otherwise we will always
  1495. * just schedule the send to happen later.
  1496. */
  1497. call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
  1498. for (; wr; wr = wr->next) {
  1499. err = rvt_post_one_wr(qp, wr, &call_send);
  1500. if (unlikely(err)) {
  1501. *bad_wr = wr;
  1502. goto bail;
  1503. }
  1504. nreq++;
  1505. }
  1506. bail:
  1507. spin_unlock_irqrestore(&qp->s_hlock, flags);
  1508. if (nreq) {
  1509. if (call_send)
  1510. rdi->driver_f.do_send(qp);
  1511. else
  1512. rdi->driver_f.schedule_send_no_lock(qp);
  1513. }
  1514. return err;
  1515. }
  1516. /**
  1517. * rvt_post_srq_receive - post a receive on a shared receive queue
  1518. * @ibsrq: the SRQ to post the receive on
  1519. * @wr: the list of work requests to post
  1520. * @bad_wr: A pointer to the first WR to cause a problem is put here
  1521. *
  1522. * This may be called from interrupt context.
  1523. *
  1524. * Return: 0 on success else errno
  1525. */
  1526. int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  1527. struct ib_recv_wr **bad_wr)
  1528. {
  1529. struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
  1530. struct rvt_rwq *wq;
  1531. unsigned long flags;
  1532. for (; wr; wr = wr->next) {
  1533. struct rvt_rwqe *wqe;
  1534. u32 next;
  1535. int i;
  1536. if ((unsigned)wr->num_sge > srq->rq.max_sge) {
  1537. *bad_wr = wr;
  1538. return -EINVAL;
  1539. }
  1540. spin_lock_irqsave(&srq->rq.lock, flags);
  1541. wq = srq->rq.wq;
  1542. next = wq->head + 1;
  1543. if (next >= srq->rq.size)
  1544. next = 0;
  1545. if (next == wq->tail) {
  1546. spin_unlock_irqrestore(&srq->rq.lock, flags);
  1547. *bad_wr = wr;
  1548. return -ENOMEM;
  1549. }
  1550. wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
  1551. wqe->wr_id = wr->wr_id;
  1552. wqe->num_sge = wr->num_sge;
  1553. for (i = 0; i < wr->num_sge; i++)
  1554. wqe->sg_list[i] = wr->sg_list[i];
  1555. /* Make sure queue entry is written before the head index. */
  1556. smp_wmb();
  1557. wq->head = next;
  1558. spin_unlock_irqrestore(&srq->rq.lock, flags);
  1559. }
  1560. return 0;
  1561. }