qplib_fp.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: Fast Path Operators
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/sched.h>
  41. #include <linux/slab.h>
  42. #include <linux/pci.h>
  43. #include <linux/prefetch.h>
  44. #include "roce_hsi.h"
  45. #include "qplib_res.h"
  46. #include "qplib_rcfw.h"
  47. #include "qplib_sp.h"
  48. #include "qplib_fp.h"
  49. static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
  50. static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
  51. static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
  52. {
  53. qp->sq.condition = false;
  54. qp->sq.send_phantom = false;
  55. qp->sq.single = false;
  56. }
  57. /* Flush list */
  58. static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
  59. {
  60. struct bnxt_qplib_cq *scq, *rcq;
  61. scq = qp->scq;
  62. rcq = qp->rcq;
  63. if (!qp->sq.flushed) {
  64. dev_dbg(&scq->hwq.pdev->dev,
  65. "QPLIB: FP: Adding to SQ Flush list = %p",
  66. qp);
  67. bnxt_qplib_cancel_phantom_processing(qp);
  68. list_add_tail(&qp->sq_flush, &scq->sqf_head);
  69. qp->sq.flushed = true;
  70. }
  71. if (!qp->srq) {
  72. if (!qp->rq.flushed) {
  73. dev_dbg(&rcq->hwq.pdev->dev,
  74. "QPLIB: FP: Adding to RQ Flush list = %p",
  75. qp);
  76. list_add_tail(&qp->rq_flush, &rcq->rqf_head);
  77. qp->rq.flushed = true;
  78. }
  79. }
  80. }
  81. void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
  82. unsigned long *flags)
  83. __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
  84. {
  85. spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
  86. if (qp->scq == qp->rcq)
  87. __acquire(&qp->rcq->hwq.lock);
  88. else
  89. spin_lock(&qp->rcq->hwq.lock);
  90. }
  91. void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
  92. unsigned long *flags)
  93. __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
  94. {
  95. if (qp->scq == qp->rcq)
  96. __release(&qp->rcq->hwq.lock);
  97. else
  98. spin_unlock(&qp->rcq->hwq.lock);
  99. spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
  100. }
  101. static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
  102. struct bnxt_qplib_cq *cq)
  103. {
  104. struct bnxt_qplib_cq *buddy_cq = NULL;
  105. if (qp->scq == qp->rcq)
  106. buddy_cq = NULL;
  107. else if (qp->scq == cq)
  108. buddy_cq = qp->rcq;
  109. else
  110. buddy_cq = qp->scq;
  111. return buddy_cq;
  112. }
  113. static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
  114. struct bnxt_qplib_cq *cq)
  115. __acquires(&buddy_cq->hwq.lock)
  116. {
  117. struct bnxt_qplib_cq *buddy_cq = NULL;
  118. buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
  119. if (!buddy_cq)
  120. __acquire(&cq->hwq.lock);
  121. else
  122. spin_lock(&buddy_cq->hwq.lock);
  123. }
  124. static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
  125. struct bnxt_qplib_cq *cq)
  126. __releases(&buddy_cq->hwq.lock)
  127. {
  128. struct bnxt_qplib_cq *buddy_cq = NULL;
  129. buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
  130. if (!buddy_cq)
  131. __release(&cq->hwq.lock);
  132. else
  133. spin_unlock(&buddy_cq->hwq.lock);
  134. }
  135. void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
  136. {
  137. unsigned long flags;
  138. bnxt_qplib_acquire_cq_locks(qp, &flags);
  139. __bnxt_qplib_add_flush_qp(qp);
  140. bnxt_qplib_release_cq_locks(qp, &flags);
  141. }
  142. static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
  143. {
  144. struct bnxt_qplib_cq *scq, *rcq;
  145. scq = qp->scq;
  146. rcq = qp->rcq;
  147. if (qp->sq.flushed) {
  148. qp->sq.flushed = false;
  149. list_del(&qp->sq_flush);
  150. }
  151. if (!qp->srq) {
  152. if (qp->rq.flushed) {
  153. qp->rq.flushed = false;
  154. list_del(&qp->rq_flush);
  155. }
  156. }
  157. }
  158. void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
  159. {
  160. unsigned long flags;
  161. bnxt_qplib_acquire_cq_locks(qp, &flags);
  162. __clean_cq(qp->scq, (u64)(unsigned long)qp);
  163. qp->sq.hwq.prod = 0;
  164. qp->sq.hwq.cons = 0;
  165. __clean_cq(qp->rcq, (u64)(unsigned long)qp);
  166. qp->rq.hwq.prod = 0;
  167. qp->rq.hwq.cons = 0;
  168. __bnxt_qplib_del_flush_qp(qp);
  169. bnxt_qplib_release_cq_locks(qp, &flags);
  170. }
  171. static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
  172. {
  173. struct bnxt_qplib_nq_work *nq_work =
  174. container_of(work, struct bnxt_qplib_nq_work, work);
  175. struct bnxt_qplib_cq *cq = nq_work->cq;
  176. struct bnxt_qplib_nq *nq = nq_work->nq;
  177. if (cq && nq) {
  178. spin_lock_bh(&cq->compl_lock);
  179. if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
  180. dev_dbg(&nq->pdev->dev,
  181. "%s:Trigger cq = %p event nq = %p\n",
  182. __func__, cq, nq);
  183. nq->cqn_handler(nq, cq);
  184. }
  185. spin_unlock_bh(&cq->compl_lock);
  186. }
  187. kfree(nq_work);
  188. }
  189. static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
  190. struct bnxt_qplib_qp *qp)
  191. {
  192. struct bnxt_qplib_q *rq = &qp->rq;
  193. struct bnxt_qplib_q *sq = &qp->sq;
  194. if (qp->rq_hdr_buf)
  195. dma_free_coherent(&res->pdev->dev,
  196. rq->hwq.max_elements * qp->rq_hdr_buf_size,
  197. qp->rq_hdr_buf, qp->rq_hdr_buf_map);
  198. if (qp->sq_hdr_buf)
  199. dma_free_coherent(&res->pdev->dev,
  200. sq->hwq.max_elements * qp->sq_hdr_buf_size,
  201. qp->sq_hdr_buf, qp->sq_hdr_buf_map);
  202. qp->rq_hdr_buf = NULL;
  203. qp->sq_hdr_buf = NULL;
  204. qp->rq_hdr_buf_map = 0;
  205. qp->sq_hdr_buf_map = 0;
  206. qp->sq_hdr_buf_size = 0;
  207. qp->rq_hdr_buf_size = 0;
  208. }
  209. static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
  210. struct bnxt_qplib_qp *qp)
  211. {
  212. struct bnxt_qplib_q *rq = &qp->rq;
  213. struct bnxt_qplib_q *sq = &qp->rq;
  214. int rc = 0;
  215. if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
  216. qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
  217. sq->hwq.max_elements *
  218. qp->sq_hdr_buf_size,
  219. &qp->sq_hdr_buf_map, GFP_KERNEL);
  220. if (!qp->sq_hdr_buf) {
  221. rc = -ENOMEM;
  222. dev_err(&res->pdev->dev,
  223. "QPLIB: Failed to create sq_hdr_buf");
  224. goto fail;
  225. }
  226. }
  227. if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
  228. qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
  229. rq->hwq.max_elements *
  230. qp->rq_hdr_buf_size,
  231. &qp->rq_hdr_buf_map,
  232. GFP_KERNEL);
  233. if (!qp->rq_hdr_buf) {
  234. rc = -ENOMEM;
  235. dev_err(&res->pdev->dev,
  236. "QPLIB: Failed to create rq_hdr_buf");
  237. goto fail;
  238. }
  239. }
  240. return 0;
  241. fail:
  242. bnxt_qplib_free_qp_hdr_buf(res, qp);
  243. return rc;
  244. }
  245. static void bnxt_qplib_service_nq(unsigned long data)
  246. {
  247. struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
  248. struct bnxt_qplib_hwq *hwq = &nq->hwq;
  249. struct nq_base *nqe, **nq_ptr;
  250. struct bnxt_qplib_cq *cq;
  251. int num_cqne_processed = 0;
  252. u32 sw_cons, raw_cons;
  253. u16 type;
  254. int budget = nq->budget;
  255. u64 q_handle;
  256. /* Service the NQ until empty */
  257. raw_cons = hwq->cons;
  258. while (budget--) {
  259. sw_cons = HWQ_CMP(raw_cons, hwq);
  260. nq_ptr = (struct nq_base **)hwq->pbl_ptr;
  261. nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
  262. if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
  263. break;
  264. type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
  265. switch (type) {
  266. case NQ_BASE_TYPE_CQ_NOTIFICATION:
  267. {
  268. struct nq_cn *nqcne = (struct nq_cn *)nqe;
  269. q_handle = le32_to_cpu(nqcne->cq_handle_low);
  270. q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
  271. << 32;
  272. cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
  273. bnxt_qplib_arm_cq_enable(cq);
  274. spin_lock_bh(&cq->compl_lock);
  275. atomic_set(&cq->arm_state, 0);
  276. if (!nq->cqn_handler(nq, (cq)))
  277. num_cqne_processed++;
  278. else
  279. dev_warn(&nq->pdev->dev,
  280. "QPLIB: cqn - type 0x%x not handled",
  281. type);
  282. spin_unlock_bh(&cq->compl_lock);
  283. break;
  284. }
  285. case NQ_BASE_TYPE_DBQ_EVENT:
  286. break;
  287. default:
  288. dev_warn(&nq->pdev->dev,
  289. "QPLIB: nqe with type = 0x%x not handled",
  290. type);
  291. break;
  292. }
  293. raw_cons++;
  294. }
  295. if (hwq->cons != raw_cons) {
  296. hwq->cons = raw_cons;
  297. NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
  298. }
  299. }
  300. static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
  301. {
  302. struct bnxt_qplib_nq *nq = dev_instance;
  303. struct bnxt_qplib_hwq *hwq = &nq->hwq;
  304. struct nq_base **nq_ptr;
  305. u32 sw_cons;
  306. /* Prefetch the NQ element */
  307. sw_cons = HWQ_CMP(hwq->cons, hwq);
  308. nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
  309. prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
  310. /* Fan out to CPU affinitized kthreads? */
  311. tasklet_schedule(&nq->worker);
  312. return IRQ_HANDLED;
  313. }
  314. void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
  315. {
  316. if (nq->cqn_wq) {
  317. destroy_workqueue(nq->cqn_wq);
  318. nq->cqn_wq = NULL;
  319. }
  320. /* Make sure the HW is stopped! */
  321. synchronize_irq(nq->vector);
  322. tasklet_disable(&nq->worker);
  323. tasklet_kill(&nq->worker);
  324. if (nq->requested) {
  325. free_irq(nq->vector, nq);
  326. nq->requested = false;
  327. }
  328. if (nq->bar_reg_iomem)
  329. iounmap(nq->bar_reg_iomem);
  330. nq->bar_reg_iomem = NULL;
  331. nq->cqn_handler = NULL;
  332. nq->srqn_handler = NULL;
  333. nq->vector = 0;
  334. }
  335. int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
  336. int msix_vector, int bar_reg_offset,
  337. int (*cqn_handler)(struct bnxt_qplib_nq *nq,
  338. struct bnxt_qplib_cq *),
  339. int (*srqn_handler)(struct bnxt_qplib_nq *nq,
  340. void *, u8 event))
  341. {
  342. resource_size_t nq_base;
  343. int rc = -1;
  344. nq->pdev = pdev;
  345. nq->vector = msix_vector;
  346. nq->cqn_handler = cqn_handler;
  347. nq->srqn_handler = srqn_handler;
  348. tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
  349. /* Have a task to schedule CQ notifiers in post send case */
  350. nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
  351. if (!nq->cqn_wq)
  352. goto fail;
  353. nq->requested = false;
  354. rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, "bnxt_qplib_nq", nq);
  355. if (rc) {
  356. dev_err(&nq->pdev->dev,
  357. "Failed to request IRQ for NQ: %#x", rc);
  358. bnxt_qplib_disable_nq(nq);
  359. goto fail;
  360. }
  361. nq->requested = true;
  362. nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
  363. nq->bar_reg_off = bar_reg_offset;
  364. nq_base = pci_resource_start(pdev, nq->bar_reg);
  365. if (!nq_base) {
  366. rc = -ENOMEM;
  367. goto fail;
  368. }
  369. nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
  370. if (!nq->bar_reg_iomem) {
  371. rc = -ENOMEM;
  372. goto fail;
  373. }
  374. NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
  375. return 0;
  376. fail:
  377. bnxt_qplib_disable_nq(nq);
  378. return rc;
  379. }
  380. void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
  381. {
  382. if (nq->hwq.max_elements)
  383. bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
  384. }
  385. int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
  386. {
  387. nq->pdev = pdev;
  388. if (!nq->hwq.max_elements ||
  389. nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
  390. nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
  391. if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
  392. &nq->hwq.max_elements,
  393. BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
  394. PAGE_SIZE, HWQ_TYPE_L2_CMPL))
  395. return -ENOMEM;
  396. nq->budget = 8;
  397. return 0;
  398. }
  399. /* QP */
  400. int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  401. {
  402. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  403. struct cmdq_create_qp1 req;
  404. struct creq_create_qp1_resp resp;
  405. struct bnxt_qplib_pbl *pbl;
  406. struct bnxt_qplib_q *sq = &qp->sq;
  407. struct bnxt_qplib_q *rq = &qp->rq;
  408. int rc;
  409. u16 cmd_flags = 0;
  410. u32 qp_flags = 0;
  411. RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
  412. /* General */
  413. req.type = qp->type;
  414. req.dpi = cpu_to_le32(qp->dpi->dpi);
  415. req.qp_handle = cpu_to_le64(qp->qp_handle);
  416. /* SQ */
  417. sq->hwq.max_elements = sq->max_wqe;
  418. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
  419. &sq->hwq.max_elements,
  420. BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
  421. PAGE_SIZE, HWQ_TYPE_QUEUE);
  422. if (rc)
  423. goto exit;
  424. sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
  425. if (!sq->swq) {
  426. rc = -ENOMEM;
  427. goto fail_sq;
  428. }
  429. pbl = &sq->hwq.pbl[PBL_LVL_0];
  430. req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  431. req.sq_pg_size_sq_lvl =
  432. ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
  433. << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
  434. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  435. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
  436. pbl->pg_size == ROCE_PG_SIZE_8K ?
  437. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
  438. pbl->pg_size == ROCE_PG_SIZE_64K ?
  439. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
  440. pbl->pg_size == ROCE_PG_SIZE_2M ?
  441. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
  442. pbl->pg_size == ROCE_PG_SIZE_8M ?
  443. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
  444. pbl->pg_size == ROCE_PG_SIZE_1G ?
  445. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
  446. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
  447. if (qp->scq)
  448. req.scq_cid = cpu_to_le32(qp->scq->id);
  449. qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
  450. /* RQ */
  451. if (rq->max_wqe) {
  452. rq->hwq.max_elements = qp->rq.max_wqe;
  453. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
  454. &rq->hwq.max_elements,
  455. BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
  456. PAGE_SIZE, HWQ_TYPE_QUEUE);
  457. if (rc)
  458. goto fail_sq;
  459. rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
  460. GFP_KERNEL);
  461. if (!rq->swq) {
  462. rc = -ENOMEM;
  463. goto fail_rq;
  464. }
  465. pbl = &rq->hwq.pbl[PBL_LVL_0];
  466. req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  467. req.rq_pg_size_rq_lvl =
  468. ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
  469. CMDQ_CREATE_QP1_RQ_LVL_SFT) |
  470. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  471. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
  472. pbl->pg_size == ROCE_PG_SIZE_8K ?
  473. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
  474. pbl->pg_size == ROCE_PG_SIZE_64K ?
  475. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
  476. pbl->pg_size == ROCE_PG_SIZE_2M ?
  477. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
  478. pbl->pg_size == ROCE_PG_SIZE_8M ?
  479. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
  480. pbl->pg_size == ROCE_PG_SIZE_1G ?
  481. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
  482. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
  483. if (qp->rcq)
  484. req.rcq_cid = cpu_to_le32(qp->rcq->id);
  485. }
  486. /* Header buffer - allow hdr_buf pass in */
  487. rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
  488. if (rc) {
  489. rc = -ENOMEM;
  490. goto fail;
  491. }
  492. req.qp_flags = cpu_to_le32(qp_flags);
  493. req.sq_size = cpu_to_le32(sq->hwq.max_elements);
  494. req.rq_size = cpu_to_le32(rq->hwq.max_elements);
  495. req.sq_fwo_sq_sge =
  496. cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
  497. CMDQ_CREATE_QP1_SQ_SGE_SFT);
  498. req.rq_fwo_rq_sge =
  499. cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
  500. CMDQ_CREATE_QP1_RQ_SGE_SFT);
  501. req.pd_id = cpu_to_le32(qp->pd->id);
  502. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  503. (void *)&resp, NULL, 0);
  504. if (rc)
  505. goto fail;
  506. qp->id = le32_to_cpu(resp.xid);
  507. qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
  508. rcfw->qp_tbl[qp->id].qp_id = qp->id;
  509. rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
  510. return 0;
  511. fail:
  512. bnxt_qplib_free_qp_hdr_buf(res, qp);
  513. fail_rq:
  514. bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
  515. kfree(rq->swq);
  516. fail_sq:
  517. bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
  518. kfree(sq->swq);
  519. exit:
  520. return rc;
  521. }
  522. int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  523. {
  524. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  525. struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
  526. struct cmdq_create_qp req;
  527. struct creq_create_qp_resp resp;
  528. struct bnxt_qplib_pbl *pbl;
  529. struct sq_psn_search **psn_search_ptr;
  530. unsigned long int psn_search, poff = 0;
  531. struct bnxt_qplib_q *sq = &qp->sq;
  532. struct bnxt_qplib_q *rq = &qp->rq;
  533. struct bnxt_qplib_hwq *xrrq;
  534. int i, rc, req_size, psn_sz;
  535. u16 cmd_flags = 0, max_ssge;
  536. u32 sw_prod, qp_flags = 0;
  537. RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
  538. /* General */
  539. req.type = qp->type;
  540. req.dpi = cpu_to_le32(qp->dpi->dpi);
  541. req.qp_handle = cpu_to_le64(qp->qp_handle);
  542. /* SQ */
  543. psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
  544. sizeof(struct sq_psn_search) : 0;
  545. sq->hwq.max_elements = sq->max_wqe;
  546. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
  547. sq->nmap, &sq->hwq.max_elements,
  548. BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
  549. psn_sz,
  550. PAGE_SIZE, HWQ_TYPE_QUEUE);
  551. if (rc)
  552. goto exit;
  553. sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
  554. if (!sq->swq) {
  555. rc = -ENOMEM;
  556. goto fail_sq;
  557. }
  558. hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
  559. if (psn_sz) {
  560. psn_search_ptr = (struct sq_psn_search **)
  561. &hw_sq_send_ptr[get_sqe_pg
  562. (sq->hwq.max_elements)];
  563. psn_search = (unsigned long int)
  564. &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
  565. [get_sqe_idx(sq->hwq.max_elements)];
  566. if (psn_search & ~PAGE_MASK) {
  567. /* If the psn_search does not start on a page boundary,
  568. * then calculate the offset
  569. */
  570. poff = (psn_search & ~PAGE_MASK) /
  571. BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
  572. }
  573. for (i = 0; i < sq->hwq.max_elements; i++)
  574. sq->swq[i].psn_search =
  575. &psn_search_ptr[get_psne_pg(i + poff)]
  576. [get_psne_idx(i + poff)];
  577. }
  578. pbl = &sq->hwq.pbl[PBL_LVL_0];
  579. req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  580. req.sq_pg_size_sq_lvl =
  581. ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
  582. << CMDQ_CREATE_QP_SQ_LVL_SFT) |
  583. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  584. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
  585. pbl->pg_size == ROCE_PG_SIZE_8K ?
  586. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
  587. pbl->pg_size == ROCE_PG_SIZE_64K ?
  588. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
  589. pbl->pg_size == ROCE_PG_SIZE_2M ?
  590. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
  591. pbl->pg_size == ROCE_PG_SIZE_8M ?
  592. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
  593. pbl->pg_size == ROCE_PG_SIZE_1G ?
  594. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
  595. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
  596. /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
  597. hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
  598. for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
  599. hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
  600. [get_sqe_idx(sw_prod)];
  601. hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
  602. }
  603. if (qp->scq)
  604. req.scq_cid = cpu_to_le32(qp->scq->id);
  605. qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
  606. qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
  607. if (qp->sig_type)
  608. qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
  609. /* RQ */
  610. if (rq->max_wqe) {
  611. rq->hwq.max_elements = rq->max_wqe;
  612. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
  613. rq->nmap, &rq->hwq.max_elements,
  614. BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
  615. PAGE_SIZE, HWQ_TYPE_QUEUE);
  616. if (rc)
  617. goto fail_sq;
  618. rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
  619. GFP_KERNEL);
  620. if (!rq->swq) {
  621. rc = -ENOMEM;
  622. goto fail_rq;
  623. }
  624. pbl = &rq->hwq.pbl[PBL_LVL_0];
  625. req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  626. req.rq_pg_size_rq_lvl =
  627. ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
  628. CMDQ_CREATE_QP_RQ_LVL_SFT) |
  629. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  630. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
  631. pbl->pg_size == ROCE_PG_SIZE_8K ?
  632. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
  633. pbl->pg_size == ROCE_PG_SIZE_64K ?
  634. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
  635. pbl->pg_size == ROCE_PG_SIZE_2M ?
  636. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
  637. pbl->pg_size == ROCE_PG_SIZE_8M ?
  638. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
  639. pbl->pg_size == ROCE_PG_SIZE_1G ?
  640. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
  641. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
  642. }
  643. if (qp->rcq)
  644. req.rcq_cid = cpu_to_le32(qp->rcq->id);
  645. req.qp_flags = cpu_to_le32(qp_flags);
  646. req.sq_size = cpu_to_le32(sq->hwq.max_elements);
  647. req.rq_size = cpu_to_le32(rq->hwq.max_elements);
  648. qp->sq_hdr_buf = NULL;
  649. qp->rq_hdr_buf = NULL;
  650. rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
  651. if (rc)
  652. goto fail_rq;
  653. /* CTRL-22434: Irrespective of the requested SGE count on the SQ
  654. * always create the QP with max send sges possible if the requested
  655. * inline size is greater than 0.
  656. */
  657. max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
  658. req.sq_fwo_sq_sge = cpu_to_le16(
  659. ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
  660. << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
  661. req.rq_fwo_rq_sge = cpu_to_le16(
  662. ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
  663. << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
  664. /* ORRQ and IRRQ */
  665. if (psn_sz) {
  666. xrrq = &qp->orrq;
  667. xrrq->max_elements =
  668. ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
  669. req_size = xrrq->max_elements *
  670. BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
  671. req_size &= ~(PAGE_SIZE - 1);
  672. rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
  673. &xrrq->max_elements,
  674. BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
  675. 0, req_size, HWQ_TYPE_CTX);
  676. if (rc)
  677. goto fail_buf_free;
  678. pbl = &xrrq->pbl[PBL_LVL_0];
  679. req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
  680. xrrq = &qp->irrq;
  681. xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
  682. qp->max_dest_rd_atomic);
  683. req_size = xrrq->max_elements *
  684. BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
  685. req_size &= ~(PAGE_SIZE - 1);
  686. rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
  687. &xrrq->max_elements,
  688. BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
  689. 0, req_size, HWQ_TYPE_CTX);
  690. if (rc)
  691. goto fail_orrq;
  692. pbl = &xrrq->pbl[PBL_LVL_0];
  693. req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
  694. }
  695. req.pd_id = cpu_to_le32(qp->pd->id);
  696. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  697. (void *)&resp, NULL, 0);
  698. if (rc)
  699. goto fail;
  700. qp->id = le32_to_cpu(resp.xid);
  701. qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
  702. INIT_LIST_HEAD(&qp->sq_flush);
  703. INIT_LIST_HEAD(&qp->rq_flush);
  704. rcfw->qp_tbl[qp->id].qp_id = qp->id;
  705. rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
  706. return 0;
  707. fail:
  708. if (qp->irrq.max_elements)
  709. bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
  710. fail_orrq:
  711. if (qp->orrq.max_elements)
  712. bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
  713. fail_buf_free:
  714. bnxt_qplib_free_qp_hdr_buf(res, qp);
  715. fail_rq:
  716. bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
  717. kfree(rq->swq);
  718. fail_sq:
  719. bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
  720. kfree(sq->swq);
  721. exit:
  722. return rc;
  723. }
  724. static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
  725. {
  726. switch (qp->state) {
  727. case CMDQ_MODIFY_QP_NEW_STATE_RTR:
  728. /* INIT->RTR, configure the path_mtu to the default
  729. * 2048 if not being requested
  730. */
  731. if (!(qp->modify_flags &
  732. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
  733. qp->modify_flags |=
  734. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  735. qp->path_mtu =
  736. CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  737. }
  738. qp->modify_flags &=
  739. ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
  740. /* Bono FW require the max_dest_rd_atomic to be >= 1 */
  741. if (qp->max_dest_rd_atomic < 1)
  742. qp->max_dest_rd_atomic = 1;
  743. qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
  744. /* Bono FW 20.6.5 requires SGID_INDEX configuration */
  745. if (!(qp->modify_flags &
  746. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
  747. qp->modify_flags |=
  748. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
  749. qp->ah.sgid_index = 0;
  750. }
  751. break;
  752. default:
  753. break;
  754. }
  755. }
  756. static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
  757. {
  758. switch (qp->state) {
  759. case CMDQ_MODIFY_QP_NEW_STATE_RTS:
  760. /* Bono FW requires the max_rd_atomic to be >= 1 */
  761. if (qp->max_rd_atomic < 1)
  762. qp->max_rd_atomic = 1;
  763. /* Bono FW does not allow PKEY_INDEX,
  764. * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
  765. * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
  766. * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
  767. * modification
  768. */
  769. qp->modify_flags &=
  770. ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
  771. CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
  772. CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
  773. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
  774. CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
  775. CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
  776. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
  777. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
  778. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
  779. CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
  780. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
  781. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
  782. break;
  783. default:
  784. break;
  785. }
  786. }
  787. static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
  788. {
  789. switch (qp->cur_qp_state) {
  790. case CMDQ_MODIFY_QP_NEW_STATE_RESET:
  791. break;
  792. case CMDQ_MODIFY_QP_NEW_STATE_INIT:
  793. __modify_flags_from_init_state(qp);
  794. break;
  795. case CMDQ_MODIFY_QP_NEW_STATE_RTR:
  796. __modify_flags_from_rtr_state(qp);
  797. break;
  798. case CMDQ_MODIFY_QP_NEW_STATE_RTS:
  799. break;
  800. case CMDQ_MODIFY_QP_NEW_STATE_SQD:
  801. break;
  802. case CMDQ_MODIFY_QP_NEW_STATE_SQE:
  803. break;
  804. case CMDQ_MODIFY_QP_NEW_STATE_ERR:
  805. break;
  806. default:
  807. break;
  808. }
  809. }
  810. int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  811. {
  812. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  813. struct cmdq_modify_qp req;
  814. struct creq_modify_qp_resp resp;
  815. u16 cmd_flags = 0, pkey;
  816. u32 temp32[4];
  817. u32 bmask;
  818. int rc;
  819. RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
  820. /* Filter out the qp_attr_mask based on the state->new transition */
  821. __filter_modify_flags(qp);
  822. bmask = qp->modify_flags;
  823. req.modify_mask = cpu_to_le32(qp->modify_flags);
  824. req.qp_cid = cpu_to_le32(qp->id);
  825. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
  826. req.network_type_en_sqd_async_notify_new_state =
  827. (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
  828. (qp->en_sqd_async_notify ?
  829. CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
  830. }
  831. req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
  832. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
  833. req.access = qp->access;
  834. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
  835. if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
  836. qp->pkey_index, &pkey))
  837. req.pkey = cpu_to_le16(pkey);
  838. }
  839. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
  840. req.qkey = cpu_to_le32(qp->qkey);
  841. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
  842. memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
  843. req.dgid[0] = cpu_to_le32(temp32[0]);
  844. req.dgid[1] = cpu_to_le32(temp32[1]);
  845. req.dgid[2] = cpu_to_le32(temp32[2]);
  846. req.dgid[3] = cpu_to_le32(temp32[3]);
  847. }
  848. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
  849. req.flow_label = cpu_to_le32(qp->ah.flow_label);
  850. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
  851. req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
  852. [qp->ah.sgid_index]);
  853. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
  854. req.hop_limit = qp->ah.hop_limit;
  855. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
  856. req.traffic_class = qp->ah.traffic_class;
  857. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
  858. memcpy(req.dest_mac, qp->ah.dmac, 6);
  859. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
  860. req.path_mtu = qp->path_mtu;
  861. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
  862. req.timeout = qp->timeout;
  863. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
  864. req.retry_cnt = qp->retry_cnt;
  865. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
  866. req.rnr_retry = qp->rnr_retry;
  867. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
  868. req.min_rnr_timer = qp->min_rnr_timer;
  869. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
  870. req.rq_psn = cpu_to_le32(qp->rq.psn);
  871. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
  872. req.sq_psn = cpu_to_le32(qp->sq.psn);
  873. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
  874. req.max_rd_atomic =
  875. ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
  876. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
  877. req.max_dest_rd_atomic =
  878. IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
  879. req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
  880. req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
  881. req.sq_sge = cpu_to_le16(qp->sq.max_sge);
  882. req.rq_sge = cpu_to_le16(qp->rq.max_sge);
  883. req.max_inline_data = cpu_to_le32(qp->max_inline_data);
  884. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
  885. req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
  886. req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
  887. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  888. (void *)&resp, NULL, 0);
  889. if (rc)
  890. return rc;
  891. qp->cur_qp_state = qp->state;
  892. return 0;
  893. }
  894. int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  895. {
  896. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  897. struct cmdq_query_qp req;
  898. struct creq_query_qp_resp resp;
  899. struct bnxt_qplib_rcfw_sbuf *sbuf;
  900. struct creq_query_qp_resp_sb *sb;
  901. u16 cmd_flags = 0;
  902. u32 temp32[4];
  903. int i, rc = 0;
  904. RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
  905. sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
  906. if (!sbuf)
  907. return -ENOMEM;
  908. sb = sbuf->sb;
  909. req.qp_cid = cpu_to_le32(qp->id);
  910. req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
  911. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  912. (void *)sbuf, 0);
  913. if (rc)
  914. goto bail;
  915. /* Extract the context from the side buffer */
  916. qp->state = sb->en_sqd_async_notify_state &
  917. CREQ_QUERY_QP_RESP_SB_STATE_MASK;
  918. qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
  919. CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
  920. true : false;
  921. qp->access = sb->access;
  922. qp->pkey_index = le16_to_cpu(sb->pkey);
  923. qp->qkey = le32_to_cpu(sb->qkey);
  924. temp32[0] = le32_to_cpu(sb->dgid[0]);
  925. temp32[1] = le32_to_cpu(sb->dgid[1]);
  926. temp32[2] = le32_to_cpu(sb->dgid[2]);
  927. temp32[3] = le32_to_cpu(sb->dgid[3]);
  928. memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
  929. qp->ah.flow_label = le32_to_cpu(sb->flow_label);
  930. qp->ah.sgid_index = 0;
  931. for (i = 0; i < res->sgid_tbl.max; i++) {
  932. if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
  933. qp->ah.sgid_index = i;
  934. break;
  935. }
  936. }
  937. if (i == res->sgid_tbl.max)
  938. dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
  939. qp->ah.hop_limit = sb->hop_limit;
  940. qp->ah.traffic_class = sb->traffic_class;
  941. memcpy(qp->ah.dmac, sb->dest_mac, 6);
  942. qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
  943. CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
  944. CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
  945. qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
  946. CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
  947. CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
  948. qp->timeout = sb->timeout;
  949. qp->retry_cnt = sb->retry_cnt;
  950. qp->rnr_retry = sb->rnr_retry;
  951. qp->min_rnr_timer = sb->min_rnr_timer;
  952. qp->rq.psn = le32_to_cpu(sb->rq_psn);
  953. qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
  954. qp->sq.psn = le32_to_cpu(sb->sq_psn);
  955. qp->max_dest_rd_atomic =
  956. IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
  957. qp->sq.max_wqe = qp->sq.hwq.max_elements;
  958. qp->rq.max_wqe = qp->rq.hwq.max_elements;
  959. qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
  960. qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
  961. qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
  962. qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
  963. memcpy(qp->smac, sb->src_mac, 6);
  964. qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
  965. bail:
  966. bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
  967. return rc;
  968. }
  969. static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
  970. {
  971. struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
  972. struct cq_base *hw_cqe, **hw_cqe_ptr;
  973. int i;
  974. for (i = 0; i < cq_hwq->max_elements; i++) {
  975. hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
  976. hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
  977. if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
  978. continue;
  979. switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
  980. case CQ_BASE_CQE_TYPE_REQ:
  981. case CQ_BASE_CQE_TYPE_TERMINAL:
  982. {
  983. struct cq_req *cqe = (struct cq_req *)hw_cqe;
  984. if (qp == le64_to_cpu(cqe->qp_handle))
  985. cqe->qp_handle = 0;
  986. break;
  987. }
  988. case CQ_BASE_CQE_TYPE_RES_RC:
  989. case CQ_BASE_CQE_TYPE_RES_UD:
  990. case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
  991. {
  992. struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
  993. if (qp == le64_to_cpu(cqe->qp_handle))
  994. cqe->qp_handle = 0;
  995. break;
  996. }
  997. default:
  998. break;
  999. }
  1000. }
  1001. }
  1002. int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
  1003. struct bnxt_qplib_qp *qp)
  1004. {
  1005. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  1006. struct cmdq_destroy_qp req;
  1007. struct creq_destroy_qp_resp resp;
  1008. unsigned long flags;
  1009. u16 cmd_flags = 0;
  1010. int rc;
  1011. rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
  1012. rcfw->qp_tbl[qp->id].qp_handle = NULL;
  1013. RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
  1014. req.qp_cid = cpu_to_le32(qp->id);
  1015. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  1016. (void *)&resp, NULL, 0);
  1017. if (rc) {
  1018. rcfw->qp_tbl[qp->id].qp_id = qp->id;
  1019. rcfw->qp_tbl[qp->id].qp_handle = qp;
  1020. return rc;
  1021. }
  1022. /* Must walk the associated CQs to nullified the QP ptr */
  1023. spin_lock_irqsave(&qp->scq->hwq.lock, flags);
  1024. __clean_cq(qp->scq, (u64)(unsigned long)qp);
  1025. if (qp->rcq && qp->rcq != qp->scq) {
  1026. spin_lock(&qp->rcq->hwq.lock);
  1027. __clean_cq(qp->rcq, (u64)(unsigned long)qp);
  1028. spin_unlock(&qp->rcq->hwq.lock);
  1029. }
  1030. spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
  1031. bnxt_qplib_free_qp_hdr_buf(res, qp);
  1032. bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
  1033. kfree(qp->sq.swq);
  1034. bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
  1035. kfree(qp->rq.swq);
  1036. if (qp->irrq.max_elements)
  1037. bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
  1038. if (qp->orrq.max_elements)
  1039. bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
  1040. return 0;
  1041. }
  1042. void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
  1043. struct bnxt_qplib_sge *sge)
  1044. {
  1045. struct bnxt_qplib_q *sq = &qp->sq;
  1046. u32 sw_prod;
  1047. memset(sge, 0, sizeof(*sge));
  1048. if (qp->sq_hdr_buf) {
  1049. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1050. sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
  1051. sw_prod * qp->sq_hdr_buf_size);
  1052. sge->lkey = 0xFFFFFFFF;
  1053. sge->size = qp->sq_hdr_buf_size;
  1054. return qp->sq_hdr_buf + sw_prod * sge->size;
  1055. }
  1056. return NULL;
  1057. }
  1058. u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
  1059. {
  1060. struct bnxt_qplib_q *rq = &qp->rq;
  1061. return HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1062. }
  1063. dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
  1064. {
  1065. return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
  1066. }
  1067. void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
  1068. struct bnxt_qplib_sge *sge)
  1069. {
  1070. struct bnxt_qplib_q *rq = &qp->rq;
  1071. u32 sw_prod;
  1072. memset(sge, 0, sizeof(*sge));
  1073. if (qp->rq_hdr_buf) {
  1074. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1075. sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
  1076. sw_prod * qp->rq_hdr_buf_size);
  1077. sge->lkey = 0xFFFFFFFF;
  1078. sge->size = qp->rq_hdr_buf_size;
  1079. return qp->rq_hdr_buf + sw_prod * sge->size;
  1080. }
  1081. return NULL;
  1082. }
  1083. void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
  1084. {
  1085. struct bnxt_qplib_q *sq = &qp->sq;
  1086. struct dbr_dbr db_msg = { 0 };
  1087. u32 sw_prod;
  1088. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1089. db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
  1090. DBR_DBR_INDEX_MASK);
  1091. db_msg.type_xid =
  1092. cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1093. DBR_DBR_TYPE_SQ);
  1094. /* Flush all the WQE writes to HW */
  1095. wmb();
  1096. __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
  1097. }
  1098. int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
  1099. struct bnxt_qplib_swqe *wqe)
  1100. {
  1101. struct bnxt_qplib_q *sq = &qp->sq;
  1102. struct bnxt_qplib_swq *swq;
  1103. struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
  1104. struct sq_sge *hw_sge;
  1105. struct bnxt_qplib_nq_work *nq_work = NULL;
  1106. bool sch_handler = false;
  1107. u32 sw_prod;
  1108. u8 wqe_size16;
  1109. int i, rc = 0, data_len = 0, pkt_num = 0;
  1110. __le32 temp32;
  1111. if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
  1112. if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
  1113. sch_handler = true;
  1114. dev_dbg(&sq->hwq.pdev->dev,
  1115. "%s Error QP. Scheduling for poll_cq\n",
  1116. __func__);
  1117. goto queue_err;
  1118. }
  1119. }
  1120. if (bnxt_qplib_queue_full(sq)) {
  1121. dev_err(&sq->hwq.pdev->dev,
  1122. "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
  1123. sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
  1124. sq->q_full_delta);
  1125. rc = -ENOMEM;
  1126. goto done;
  1127. }
  1128. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1129. swq = &sq->swq[sw_prod];
  1130. swq->wr_id = wqe->wr_id;
  1131. swq->type = wqe->type;
  1132. swq->flags = wqe->flags;
  1133. if (qp->sig_type)
  1134. swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
  1135. swq->start_psn = sq->psn & BTH_PSN_MASK;
  1136. hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
  1137. hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
  1138. [get_sqe_idx(sw_prod)];
  1139. memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
  1140. if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
  1141. /* Copy the inline data */
  1142. if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
  1143. dev_warn(&sq->hwq.pdev->dev,
  1144. "QPLIB: Inline data length > 96 detected");
  1145. data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
  1146. } else {
  1147. data_len = wqe->inline_len;
  1148. }
  1149. memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
  1150. wqe_size16 = (data_len + 15) >> 4;
  1151. } else {
  1152. for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
  1153. i < wqe->num_sge; i++, hw_sge++) {
  1154. hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
  1155. hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
  1156. hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
  1157. data_len += wqe->sg_list[i].size;
  1158. }
  1159. /* Each SGE entry = 1 WQE size16 */
  1160. wqe_size16 = wqe->num_sge;
  1161. /* HW requires wqe size has room for atleast one SGE even if
  1162. * none was supplied by ULP
  1163. */
  1164. if (!wqe->num_sge)
  1165. wqe_size16++;
  1166. }
  1167. /* Specifics */
  1168. switch (wqe->type) {
  1169. case BNXT_QPLIB_SWQE_TYPE_SEND:
  1170. if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
  1171. /* Assemble info for Raw Ethertype QPs */
  1172. struct sq_send_raweth_qp1 *sqe =
  1173. (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
  1174. sqe->wqe_type = wqe->type;
  1175. sqe->flags = wqe->flags;
  1176. sqe->wqe_size = wqe_size16 +
  1177. ((offsetof(typeof(*sqe), data) + 15) >> 4);
  1178. sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
  1179. sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
  1180. sqe->length = cpu_to_le32(data_len);
  1181. sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
  1182. SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
  1183. SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
  1184. break;
  1185. }
  1186. /* else, just fall thru */
  1187. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
  1188. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
  1189. {
  1190. struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
  1191. sqe->wqe_type = wqe->type;
  1192. sqe->flags = wqe->flags;
  1193. sqe->wqe_size = wqe_size16 +
  1194. ((offsetof(typeof(*sqe), data) + 15) >> 4);
  1195. sqe->inv_key_or_imm_data = cpu_to_le32(
  1196. wqe->send.inv_key);
  1197. if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
  1198. sqe->q_key = cpu_to_le32(wqe->send.q_key);
  1199. sqe->dst_qp = cpu_to_le32(
  1200. wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
  1201. sqe->length = cpu_to_le32(data_len);
  1202. sqe->avid = cpu_to_le32(wqe->send.avid &
  1203. SQ_SEND_AVID_MASK);
  1204. sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
  1205. } else {
  1206. sqe->length = cpu_to_le32(data_len);
  1207. sqe->dst_qp = 0;
  1208. sqe->avid = 0;
  1209. if (qp->mtu)
  1210. pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
  1211. if (!pkt_num)
  1212. pkt_num = 1;
  1213. sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
  1214. }
  1215. break;
  1216. }
  1217. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
  1218. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
  1219. case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
  1220. {
  1221. struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
  1222. sqe->wqe_type = wqe->type;
  1223. sqe->flags = wqe->flags;
  1224. sqe->wqe_size = wqe_size16 +
  1225. ((offsetof(typeof(*sqe), data) + 15) >> 4);
  1226. sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
  1227. sqe->length = cpu_to_le32((u32)data_len);
  1228. sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
  1229. sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
  1230. if (qp->mtu)
  1231. pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
  1232. if (!pkt_num)
  1233. pkt_num = 1;
  1234. sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
  1235. break;
  1236. }
  1237. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
  1238. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
  1239. {
  1240. struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
  1241. sqe->wqe_type = wqe->type;
  1242. sqe->flags = wqe->flags;
  1243. sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
  1244. sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
  1245. sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
  1246. sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
  1247. if (qp->mtu)
  1248. pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
  1249. if (!pkt_num)
  1250. pkt_num = 1;
  1251. sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
  1252. break;
  1253. }
  1254. case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
  1255. {
  1256. struct sq_localinvalidate *sqe =
  1257. (struct sq_localinvalidate *)hw_sq_send_hdr;
  1258. sqe->wqe_type = wqe->type;
  1259. sqe->flags = wqe->flags;
  1260. sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
  1261. break;
  1262. }
  1263. case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
  1264. {
  1265. struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
  1266. sqe->wqe_type = wqe->type;
  1267. sqe->flags = wqe->flags;
  1268. sqe->access_cntl = wqe->frmr.access_cntl |
  1269. SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
  1270. sqe->zero_based_page_size_log =
  1271. (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
  1272. SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
  1273. (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
  1274. sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
  1275. temp32 = cpu_to_le32(wqe->frmr.length);
  1276. memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
  1277. sqe->numlevels_pbl_page_size_log =
  1278. ((wqe->frmr.pbl_pg_sz_log <<
  1279. SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
  1280. SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
  1281. ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
  1282. SQ_FR_PMR_NUMLEVELS_MASK);
  1283. for (i = 0; i < wqe->frmr.page_list_len; i++)
  1284. wqe->frmr.pbl_ptr[i] = cpu_to_le64(
  1285. wqe->frmr.page_list[i] |
  1286. PTU_PTE_VALID);
  1287. sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
  1288. sqe->va = cpu_to_le64(wqe->frmr.va);
  1289. break;
  1290. }
  1291. case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
  1292. {
  1293. struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
  1294. sqe->wqe_type = wqe->type;
  1295. sqe->flags = wqe->flags;
  1296. sqe->access_cntl = wqe->bind.access_cntl;
  1297. sqe->mw_type_zero_based = wqe->bind.mw_type |
  1298. (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
  1299. sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
  1300. sqe->l_key = cpu_to_le32(wqe->bind.r_key);
  1301. sqe->va = cpu_to_le64(wqe->bind.va);
  1302. temp32 = cpu_to_le32(wqe->bind.length);
  1303. memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
  1304. break;
  1305. }
  1306. default:
  1307. /* Bad wqe, return error */
  1308. rc = -EINVAL;
  1309. goto done;
  1310. }
  1311. swq->next_psn = sq->psn & BTH_PSN_MASK;
  1312. if (swq->psn_search) {
  1313. swq->psn_search->opcode_start_psn = cpu_to_le32(
  1314. ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
  1315. SQ_PSN_SEARCH_START_PSN_MASK) |
  1316. ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
  1317. SQ_PSN_SEARCH_OPCODE_MASK));
  1318. swq->psn_search->flags_next_psn = cpu_to_le32(
  1319. ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
  1320. SQ_PSN_SEARCH_NEXT_PSN_MASK));
  1321. }
  1322. queue_err:
  1323. if (sch_handler) {
  1324. /* Store the ULP info in the software structures */
  1325. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1326. swq = &sq->swq[sw_prod];
  1327. swq->wr_id = wqe->wr_id;
  1328. swq->type = wqe->type;
  1329. swq->flags = wqe->flags;
  1330. if (qp->sig_type)
  1331. swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
  1332. swq->start_psn = sq->psn & BTH_PSN_MASK;
  1333. }
  1334. sq->hwq.prod++;
  1335. qp->wqe_cnt++;
  1336. done:
  1337. if (sch_handler) {
  1338. nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
  1339. if (nq_work) {
  1340. nq_work->cq = qp->scq;
  1341. nq_work->nq = qp->scq->nq;
  1342. INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
  1343. queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
  1344. } else {
  1345. dev_err(&sq->hwq.pdev->dev,
  1346. "QPLIB: FP: Failed to allocate SQ nq_work!");
  1347. rc = -ENOMEM;
  1348. }
  1349. }
  1350. return rc;
  1351. }
  1352. void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
  1353. {
  1354. struct bnxt_qplib_q *rq = &qp->rq;
  1355. struct dbr_dbr db_msg = { 0 };
  1356. u32 sw_prod;
  1357. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1358. db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
  1359. DBR_DBR_INDEX_MASK);
  1360. db_msg.type_xid =
  1361. cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1362. DBR_DBR_TYPE_RQ);
  1363. /* Flush the writes to HW Rx WQE before the ringing Rx DB */
  1364. wmb();
  1365. __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
  1366. }
  1367. int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
  1368. struct bnxt_qplib_swqe *wqe)
  1369. {
  1370. struct bnxt_qplib_q *rq = &qp->rq;
  1371. struct rq_wqe *rqe, **rqe_ptr;
  1372. struct sq_sge *hw_sge;
  1373. struct bnxt_qplib_nq_work *nq_work = NULL;
  1374. bool sch_handler = false;
  1375. u32 sw_prod;
  1376. int i, rc = 0;
  1377. if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
  1378. sch_handler = true;
  1379. dev_dbg(&rq->hwq.pdev->dev,
  1380. "%s Error QP. Scheduling for poll_cq\n",
  1381. __func__);
  1382. goto queue_err;
  1383. }
  1384. if (bnxt_qplib_queue_full(rq)) {
  1385. dev_err(&rq->hwq.pdev->dev,
  1386. "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
  1387. rc = -EINVAL;
  1388. goto done;
  1389. }
  1390. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1391. rq->swq[sw_prod].wr_id = wqe->wr_id;
  1392. rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
  1393. rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
  1394. memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
  1395. /* Calculate wqe_size16 and data_len */
  1396. for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
  1397. i < wqe->num_sge; i++, hw_sge++) {
  1398. hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
  1399. hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
  1400. hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
  1401. }
  1402. rqe->wqe_type = wqe->type;
  1403. rqe->flags = wqe->flags;
  1404. rqe->wqe_size = wqe->num_sge +
  1405. ((offsetof(typeof(*rqe), data) + 15) >> 4);
  1406. /* HW requires wqe size has room for atleast one SGE even if none
  1407. * was supplied by ULP
  1408. */
  1409. if (!wqe->num_sge)
  1410. rqe->wqe_size++;
  1411. /* Supply the rqe->wr_id index to the wr_id_tbl for now */
  1412. rqe->wr_id[0] = cpu_to_le32(sw_prod);
  1413. queue_err:
  1414. if (sch_handler) {
  1415. /* Store the ULP info in the software structures */
  1416. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1417. rq->swq[sw_prod].wr_id = wqe->wr_id;
  1418. }
  1419. rq->hwq.prod++;
  1420. if (sch_handler) {
  1421. nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
  1422. if (nq_work) {
  1423. nq_work->cq = qp->rcq;
  1424. nq_work->nq = qp->rcq->nq;
  1425. INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
  1426. queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
  1427. } else {
  1428. dev_err(&rq->hwq.pdev->dev,
  1429. "QPLIB: FP: Failed to allocate RQ nq_work!");
  1430. rc = -ENOMEM;
  1431. }
  1432. }
  1433. done:
  1434. return rc;
  1435. }
  1436. /* CQ */
  1437. /* Spinlock must be held */
  1438. static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
  1439. {
  1440. struct dbr_dbr db_msg = { 0 };
  1441. db_msg.type_xid =
  1442. cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1443. DBR_DBR_TYPE_CQ_ARMENA);
  1444. /* Flush memory writes before enabling the CQ */
  1445. wmb();
  1446. __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
  1447. }
  1448. static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
  1449. {
  1450. struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
  1451. struct dbr_dbr db_msg = { 0 };
  1452. u32 sw_cons;
  1453. /* Ring DB */
  1454. sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
  1455. db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
  1456. DBR_DBR_INDEX_MASK);
  1457. db_msg.type_xid =
  1458. cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1459. arm_type);
  1460. /* flush memory writes before arming the CQ */
  1461. wmb();
  1462. __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
  1463. }
  1464. int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
  1465. {
  1466. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  1467. struct cmdq_create_cq req;
  1468. struct creq_create_cq_resp resp;
  1469. struct bnxt_qplib_pbl *pbl;
  1470. u16 cmd_flags = 0;
  1471. int rc;
  1472. cq->hwq.max_elements = cq->max_wqe;
  1473. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
  1474. cq->nmap, &cq->hwq.max_elements,
  1475. BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
  1476. PAGE_SIZE, HWQ_TYPE_QUEUE);
  1477. if (rc)
  1478. goto exit;
  1479. RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
  1480. if (!cq->dpi) {
  1481. dev_err(&rcfw->pdev->dev,
  1482. "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
  1483. return -EINVAL;
  1484. }
  1485. req.dpi = cpu_to_le32(cq->dpi->dpi);
  1486. req.cq_handle = cpu_to_le64(cq->cq_handle);
  1487. req.cq_size = cpu_to_le32(cq->hwq.max_elements);
  1488. pbl = &cq->hwq.pbl[PBL_LVL_0];
  1489. req.pg_size_lvl = cpu_to_le32(
  1490. ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
  1491. CMDQ_CREATE_CQ_LVL_SFT) |
  1492. (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
  1493. pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
  1494. pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
  1495. pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
  1496. pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
  1497. pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
  1498. CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
  1499. req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  1500. req.cq_fco_cnq_id = cpu_to_le32(
  1501. (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
  1502. CMDQ_CREATE_CQ_CNQ_ID_SFT);
  1503. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  1504. (void *)&resp, NULL, 0);
  1505. if (rc)
  1506. goto fail;
  1507. cq->id = le32_to_cpu(resp.xid);
  1508. cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
  1509. cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
  1510. init_waitqueue_head(&cq->waitq);
  1511. INIT_LIST_HEAD(&cq->sqf_head);
  1512. INIT_LIST_HEAD(&cq->rqf_head);
  1513. spin_lock_init(&cq->compl_lock);
  1514. bnxt_qplib_arm_cq_enable(cq);
  1515. return 0;
  1516. fail:
  1517. bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
  1518. exit:
  1519. return rc;
  1520. }
  1521. int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
  1522. {
  1523. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  1524. struct cmdq_destroy_cq req;
  1525. struct creq_destroy_cq_resp resp;
  1526. u16 cmd_flags = 0;
  1527. int rc;
  1528. RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
  1529. req.cq_cid = cpu_to_le32(cq->id);
  1530. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  1531. (void *)&resp, NULL, 0);
  1532. if (rc)
  1533. return rc;
  1534. bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
  1535. return 0;
  1536. }
  1537. static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
  1538. struct bnxt_qplib_cqe **pcqe, int *budget)
  1539. {
  1540. u32 sw_prod, sw_cons;
  1541. struct bnxt_qplib_cqe *cqe;
  1542. int rc = 0;
  1543. /* Now complete all outstanding SQEs with FLUSHED_ERR */
  1544. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1545. cqe = *pcqe;
  1546. while (*budget) {
  1547. sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
  1548. if (sw_cons == sw_prod) {
  1549. break;
  1550. }
  1551. /* Skip the FENCE WQE completions */
  1552. if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
  1553. bnxt_qplib_cancel_phantom_processing(qp);
  1554. goto skip_compl;
  1555. }
  1556. memset(cqe, 0, sizeof(*cqe));
  1557. cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
  1558. cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
  1559. cqe->qp_handle = (u64)(unsigned long)qp;
  1560. cqe->wr_id = sq->swq[sw_cons].wr_id;
  1561. cqe->src_qp = qp->id;
  1562. cqe->type = sq->swq[sw_cons].type;
  1563. cqe++;
  1564. (*budget)--;
  1565. skip_compl:
  1566. sq->hwq.cons++;
  1567. }
  1568. *pcqe = cqe;
  1569. if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
  1570. /* Out of budget */
  1571. rc = -EAGAIN;
  1572. return rc;
  1573. }
  1574. static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
  1575. struct bnxt_qplib_cqe **pcqe, int *budget)
  1576. {
  1577. struct bnxt_qplib_cqe *cqe;
  1578. u32 sw_prod, sw_cons;
  1579. int rc = 0;
  1580. int opcode = 0;
  1581. switch (qp->type) {
  1582. case CMDQ_CREATE_QP1_TYPE_GSI:
  1583. opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
  1584. break;
  1585. case CMDQ_CREATE_QP_TYPE_RC:
  1586. opcode = CQ_BASE_CQE_TYPE_RES_RC;
  1587. break;
  1588. case CMDQ_CREATE_QP_TYPE_UD:
  1589. opcode = CQ_BASE_CQE_TYPE_RES_UD;
  1590. break;
  1591. }
  1592. /* Flush the rest of the RQ */
  1593. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1594. cqe = *pcqe;
  1595. while (*budget) {
  1596. sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
  1597. if (sw_cons == sw_prod)
  1598. break;
  1599. memset(cqe, 0, sizeof(*cqe));
  1600. cqe->status =
  1601. CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
  1602. cqe->opcode = opcode;
  1603. cqe->qp_handle = (unsigned long)qp;
  1604. cqe->wr_id = rq->swq[sw_cons].wr_id;
  1605. cqe++;
  1606. (*budget)--;
  1607. rq->hwq.cons++;
  1608. }
  1609. *pcqe = cqe;
  1610. if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
  1611. /* Out of budget */
  1612. rc = -EAGAIN;
  1613. return rc;
  1614. }
  1615. void bnxt_qplib_mark_qp_error(void *qp_handle)
  1616. {
  1617. struct bnxt_qplib_qp *qp = qp_handle;
  1618. if (!qp)
  1619. return;
  1620. /* Must block new posting of SQ and RQ */
  1621. qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
  1622. bnxt_qplib_cancel_phantom_processing(qp);
  1623. /* Add qp to flush list of the CQ */
  1624. __bnxt_qplib_add_flush_qp(qp);
  1625. }
  1626. /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
  1627. * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
  1628. */
  1629. static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
  1630. u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
  1631. {
  1632. struct bnxt_qplib_q *sq = &qp->sq;
  1633. struct bnxt_qplib_swq *swq;
  1634. u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
  1635. struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
  1636. struct cq_req *peek_req_hwcqe;
  1637. struct bnxt_qplib_qp *peek_qp;
  1638. struct bnxt_qplib_q *peek_sq;
  1639. int i, rc = 0;
  1640. /* Normal mode */
  1641. /* Check for the psn_search marking before completing */
  1642. swq = &sq->swq[sw_sq_cons];
  1643. if (swq->psn_search &&
  1644. le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
  1645. /* Unmark */
  1646. swq->psn_search->flags_next_psn = cpu_to_le32
  1647. (le32_to_cpu(swq->psn_search->flags_next_psn)
  1648. & ~0x80000000);
  1649. dev_dbg(&cq->hwq.pdev->dev,
  1650. "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
  1651. cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
  1652. sq->condition = true;
  1653. sq->send_phantom = true;
  1654. /* TODO: Only ARM if the previous SQE is ARMALL */
  1655. bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
  1656. rc = -EAGAIN;
  1657. goto out;
  1658. }
  1659. if (sq->condition) {
  1660. /* Peek at the completions */
  1661. peek_raw_cq_cons = cq->hwq.cons;
  1662. peek_sw_cq_cons = cq_cons;
  1663. i = cq->hwq.max_elements;
  1664. while (i--) {
  1665. peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
  1666. peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
  1667. peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
  1668. [CQE_IDX(peek_sw_cq_cons)];
  1669. /* If the next hwcqe is VALID */
  1670. if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
  1671. cq->hwq.max_elements)) {
  1672. /* If the next hwcqe is a REQ */
  1673. if ((peek_hwcqe->cqe_type_toggle &
  1674. CQ_BASE_CQE_TYPE_MASK) ==
  1675. CQ_BASE_CQE_TYPE_REQ) {
  1676. peek_req_hwcqe = (struct cq_req *)
  1677. peek_hwcqe;
  1678. peek_qp = (struct bnxt_qplib_qp *)
  1679. ((unsigned long)
  1680. le64_to_cpu
  1681. (peek_req_hwcqe->qp_handle));
  1682. peek_sq = &peek_qp->sq;
  1683. peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
  1684. peek_req_hwcqe->sq_cons_idx) - 1
  1685. , &sq->hwq);
  1686. /* If the hwcqe's sq's wr_id matches */
  1687. if (peek_sq == sq &&
  1688. sq->swq[peek_sq_cons_idx].wr_id ==
  1689. BNXT_QPLIB_FENCE_WRID) {
  1690. /*
  1691. * Unbreak only if the phantom
  1692. * comes back
  1693. */
  1694. dev_dbg(&cq->hwq.pdev->dev,
  1695. "FP:Got Phantom CQE");
  1696. sq->condition = false;
  1697. sq->single = true;
  1698. rc = 0;
  1699. goto out;
  1700. }
  1701. }
  1702. /* Valid but not the phantom, so keep looping */
  1703. } else {
  1704. /* Not valid yet, just exit and wait */
  1705. rc = -EINVAL;
  1706. goto out;
  1707. }
  1708. peek_sw_cq_cons++;
  1709. peek_raw_cq_cons++;
  1710. }
  1711. dev_err(&cq->hwq.pdev->dev,
  1712. "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
  1713. cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
  1714. rc = -EINVAL;
  1715. }
  1716. out:
  1717. return rc;
  1718. }
  1719. static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
  1720. struct cq_req *hwcqe,
  1721. struct bnxt_qplib_cqe **pcqe, int *budget,
  1722. u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
  1723. {
  1724. struct bnxt_qplib_qp *qp;
  1725. struct bnxt_qplib_q *sq;
  1726. struct bnxt_qplib_cqe *cqe;
  1727. u32 sw_sq_cons, cqe_sq_cons;
  1728. struct bnxt_qplib_swq *swq;
  1729. int rc = 0;
  1730. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1731. le64_to_cpu(hwcqe->qp_handle));
  1732. if (!qp) {
  1733. dev_err(&cq->hwq.pdev->dev,
  1734. "QPLIB: FP: Process Req qp is NULL");
  1735. return -EINVAL;
  1736. }
  1737. sq = &qp->sq;
  1738. cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
  1739. if (cqe_sq_cons > sq->hwq.max_elements) {
  1740. dev_err(&cq->hwq.pdev->dev,
  1741. "QPLIB: FP: CQ Process req reported ");
  1742. dev_err(&cq->hwq.pdev->dev,
  1743. "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
  1744. cqe_sq_cons, sq->hwq.max_elements);
  1745. return -EINVAL;
  1746. }
  1747. if (qp->sq.flushed) {
  1748. dev_dbg(&cq->hwq.pdev->dev,
  1749. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1750. goto done;
  1751. }
  1752. /* Require to walk the sq's swq to fabricate CQEs for all previously
  1753. * signaled SWQEs due to CQE aggregation from the current sq cons
  1754. * to the cqe_sq_cons
  1755. */
  1756. cqe = *pcqe;
  1757. while (*budget) {
  1758. sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
  1759. if (sw_sq_cons == cqe_sq_cons)
  1760. /* Done */
  1761. break;
  1762. swq = &sq->swq[sw_sq_cons];
  1763. memset(cqe, 0, sizeof(*cqe));
  1764. cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
  1765. cqe->qp_handle = (u64)(unsigned long)qp;
  1766. cqe->src_qp = qp->id;
  1767. cqe->wr_id = swq->wr_id;
  1768. if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
  1769. goto skip;
  1770. cqe->type = swq->type;
  1771. /* For the last CQE, check for status. For errors, regardless
  1772. * of the request being signaled or not, it must complete with
  1773. * the hwcqe error status
  1774. */
  1775. if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
  1776. hwcqe->status != CQ_REQ_STATUS_OK) {
  1777. cqe->status = hwcqe->status;
  1778. dev_err(&cq->hwq.pdev->dev,
  1779. "QPLIB: FP: CQ Processed Req ");
  1780. dev_err(&cq->hwq.pdev->dev,
  1781. "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
  1782. sw_sq_cons, cqe->wr_id, cqe->status);
  1783. cqe++;
  1784. (*budget)--;
  1785. bnxt_qplib_lock_buddy_cq(qp, cq);
  1786. bnxt_qplib_mark_qp_error(qp);
  1787. bnxt_qplib_unlock_buddy_cq(qp, cq);
  1788. } else {
  1789. if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
  1790. /* Before we complete, do WA 9060 */
  1791. if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
  1792. cqe_sq_cons)) {
  1793. *lib_qp = qp;
  1794. goto out;
  1795. }
  1796. cqe->status = CQ_REQ_STATUS_OK;
  1797. cqe++;
  1798. (*budget)--;
  1799. }
  1800. }
  1801. skip:
  1802. sq->hwq.cons++;
  1803. if (sq->single)
  1804. break;
  1805. }
  1806. out:
  1807. *pcqe = cqe;
  1808. if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
  1809. /* Out of budget */
  1810. rc = -EAGAIN;
  1811. goto done;
  1812. }
  1813. /*
  1814. * Back to normal completion mode only after it has completed all of
  1815. * the WC for this CQE
  1816. */
  1817. sq->single = false;
  1818. done:
  1819. return rc;
  1820. }
  1821. static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
  1822. struct cq_res_rc *hwcqe,
  1823. struct bnxt_qplib_cqe **pcqe,
  1824. int *budget)
  1825. {
  1826. struct bnxt_qplib_qp *qp;
  1827. struct bnxt_qplib_q *rq;
  1828. struct bnxt_qplib_cqe *cqe;
  1829. u32 wr_id_idx;
  1830. int rc = 0;
  1831. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1832. le64_to_cpu(hwcqe->qp_handle));
  1833. if (!qp) {
  1834. dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
  1835. return -EINVAL;
  1836. }
  1837. if (qp->rq.flushed) {
  1838. dev_dbg(&cq->hwq.pdev->dev,
  1839. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1840. goto done;
  1841. }
  1842. cqe = *pcqe;
  1843. cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
  1844. cqe->length = le32_to_cpu(hwcqe->length);
  1845. cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
  1846. cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
  1847. cqe->flags = le16_to_cpu(hwcqe->flags);
  1848. cqe->status = hwcqe->status;
  1849. cqe->qp_handle = (u64)(unsigned long)qp;
  1850. wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
  1851. CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
  1852. rq = &qp->rq;
  1853. if (wr_id_idx > rq->hwq.max_elements) {
  1854. dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
  1855. dev_err(&cq->hwq.pdev->dev,
  1856. "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
  1857. wr_id_idx, rq->hwq.max_elements);
  1858. return -EINVAL;
  1859. }
  1860. cqe->wr_id = rq->swq[wr_id_idx].wr_id;
  1861. cqe++;
  1862. (*budget)--;
  1863. rq->hwq.cons++;
  1864. *pcqe = cqe;
  1865. if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
  1866. /* Add qp to flush list of the CQ */
  1867. bnxt_qplib_lock_buddy_cq(qp, cq);
  1868. __bnxt_qplib_add_flush_qp(qp);
  1869. bnxt_qplib_unlock_buddy_cq(qp, cq);
  1870. }
  1871. done:
  1872. return rc;
  1873. }
  1874. static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
  1875. struct cq_res_ud *hwcqe,
  1876. struct bnxt_qplib_cqe **pcqe,
  1877. int *budget)
  1878. {
  1879. struct bnxt_qplib_qp *qp;
  1880. struct bnxt_qplib_q *rq;
  1881. struct bnxt_qplib_cqe *cqe;
  1882. u32 wr_id_idx;
  1883. int rc = 0;
  1884. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1885. le64_to_cpu(hwcqe->qp_handle));
  1886. if (!qp) {
  1887. dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
  1888. return -EINVAL;
  1889. }
  1890. if (qp->rq.flushed) {
  1891. dev_dbg(&cq->hwq.pdev->dev,
  1892. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1893. goto done;
  1894. }
  1895. cqe = *pcqe;
  1896. cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
  1897. cqe->length = le32_to_cpu(hwcqe->length);
  1898. cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
  1899. cqe->flags = le16_to_cpu(hwcqe->flags);
  1900. cqe->status = hwcqe->status;
  1901. cqe->qp_handle = (u64)(unsigned long)qp;
  1902. memcpy(cqe->smac, hwcqe->src_mac, 6);
  1903. wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
  1904. & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
  1905. cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
  1906. ((le32_to_cpu(
  1907. hwcqe->src_qp_high_srq_or_rq_wr_id) &
  1908. CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
  1909. rq = &qp->rq;
  1910. if (wr_id_idx > rq->hwq.max_elements) {
  1911. dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
  1912. dev_err(&cq->hwq.pdev->dev,
  1913. "QPLIB: wr_id idx %#x exceeded RQ max %#x",
  1914. wr_id_idx, rq->hwq.max_elements);
  1915. return -EINVAL;
  1916. }
  1917. cqe->wr_id = rq->swq[wr_id_idx].wr_id;
  1918. cqe++;
  1919. (*budget)--;
  1920. rq->hwq.cons++;
  1921. *pcqe = cqe;
  1922. if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
  1923. /* Add qp to flush list of the CQ */
  1924. bnxt_qplib_lock_buddy_cq(qp, cq);
  1925. __bnxt_qplib_add_flush_qp(qp);
  1926. bnxt_qplib_unlock_buddy_cq(qp, cq);
  1927. }
  1928. done:
  1929. return rc;
  1930. }
  1931. bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
  1932. {
  1933. struct cq_base *hw_cqe, **hw_cqe_ptr;
  1934. unsigned long flags;
  1935. u32 sw_cons, raw_cons;
  1936. bool rc = true;
  1937. spin_lock_irqsave(&cq->hwq.lock, flags);
  1938. raw_cons = cq->hwq.cons;
  1939. sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
  1940. hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
  1941. hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
  1942. /* Check for Valid bit. If the CQE is valid, return false */
  1943. rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
  1944. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  1945. return rc;
  1946. }
  1947. static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
  1948. struct cq_res_raweth_qp1 *hwcqe,
  1949. struct bnxt_qplib_cqe **pcqe,
  1950. int *budget)
  1951. {
  1952. struct bnxt_qplib_qp *qp;
  1953. struct bnxt_qplib_q *rq;
  1954. struct bnxt_qplib_cqe *cqe;
  1955. u32 wr_id_idx;
  1956. int rc = 0;
  1957. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1958. le64_to_cpu(hwcqe->qp_handle));
  1959. if (!qp) {
  1960. dev_err(&cq->hwq.pdev->dev,
  1961. "QPLIB: process_cq Raw/QP1 qp is NULL");
  1962. return -EINVAL;
  1963. }
  1964. if (qp->rq.flushed) {
  1965. dev_dbg(&cq->hwq.pdev->dev,
  1966. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1967. goto done;
  1968. }
  1969. cqe = *pcqe;
  1970. cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
  1971. cqe->flags = le16_to_cpu(hwcqe->flags);
  1972. cqe->qp_handle = (u64)(unsigned long)qp;
  1973. wr_id_idx =
  1974. le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
  1975. & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
  1976. cqe->src_qp = qp->id;
  1977. if (qp->id == 1 && !cqe->length) {
  1978. /* Add workaround for the length misdetection */
  1979. cqe->length = 296;
  1980. } else {
  1981. cqe->length = le16_to_cpu(hwcqe->length);
  1982. }
  1983. cqe->pkey_index = qp->pkey_index;
  1984. memcpy(cqe->smac, qp->smac, 6);
  1985. cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
  1986. cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
  1987. rq = &qp->rq;
  1988. if (wr_id_idx > rq->hwq.max_elements) {
  1989. dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
  1990. dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
  1991. wr_id_idx, rq->hwq.max_elements);
  1992. return -EINVAL;
  1993. }
  1994. cqe->wr_id = rq->swq[wr_id_idx].wr_id;
  1995. cqe++;
  1996. (*budget)--;
  1997. rq->hwq.cons++;
  1998. *pcqe = cqe;
  1999. if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
  2000. /* Add qp to flush list of the CQ */
  2001. bnxt_qplib_lock_buddy_cq(qp, cq);
  2002. __bnxt_qplib_add_flush_qp(qp);
  2003. bnxt_qplib_unlock_buddy_cq(qp, cq);
  2004. }
  2005. done:
  2006. return rc;
  2007. }
  2008. static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
  2009. struct cq_terminal *hwcqe,
  2010. struct bnxt_qplib_cqe **pcqe,
  2011. int *budget)
  2012. {
  2013. struct bnxt_qplib_qp *qp;
  2014. struct bnxt_qplib_q *sq, *rq;
  2015. struct bnxt_qplib_cqe *cqe;
  2016. u32 sw_cons = 0, cqe_cons;
  2017. int rc = 0;
  2018. /* Check the Status */
  2019. if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
  2020. dev_warn(&cq->hwq.pdev->dev,
  2021. "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
  2022. hwcqe->status);
  2023. qp = (struct bnxt_qplib_qp *)((unsigned long)
  2024. le64_to_cpu(hwcqe->qp_handle));
  2025. if (!qp) {
  2026. dev_err(&cq->hwq.pdev->dev,
  2027. "QPLIB: FP: CQ Process terminal qp is NULL");
  2028. return -EINVAL;
  2029. }
  2030. /* Must block new posting of SQ and RQ */
  2031. qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
  2032. sq = &qp->sq;
  2033. rq = &qp->rq;
  2034. cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
  2035. if (cqe_cons == 0xFFFF)
  2036. goto do_rq;
  2037. if (cqe_cons > sq->hwq.max_elements) {
  2038. dev_err(&cq->hwq.pdev->dev,
  2039. "QPLIB: FP: CQ Process terminal reported ");
  2040. dev_err(&cq->hwq.pdev->dev,
  2041. "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
  2042. cqe_cons, sq->hwq.max_elements);
  2043. goto do_rq;
  2044. }
  2045. if (qp->sq.flushed) {
  2046. dev_dbg(&cq->hwq.pdev->dev,
  2047. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  2048. goto sq_done;
  2049. }
  2050. /* Terminal CQE can also include aggregated successful CQEs prior.
  2051. * So we must complete all CQEs from the current sq's cons to the
  2052. * cq_cons with status OK
  2053. */
  2054. cqe = *pcqe;
  2055. while (*budget) {
  2056. sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
  2057. if (sw_cons == cqe_cons)
  2058. break;
  2059. if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
  2060. memset(cqe, 0, sizeof(*cqe));
  2061. cqe->status = CQ_REQ_STATUS_OK;
  2062. cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
  2063. cqe->qp_handle = (u64)(unsigned long)qp;
  2064. cqe->src_qp = qp->id;
  2065. cqe->wr_id = sq->swq[sw_cons].wr_id;
  2066. cqe->type = sq->swq[sw_cons].type;
  2067. cqe++;
  2068. (*budget)--;
  2069. }
  2070. sq->hwq.cons++;
  2071. }
  2072. *pcqe = cqe;
  2073. if (!(*budget) && sw_cons != cqe_cons) {
  2074. /* Out of budget */
  2075. rc = -EAGAIN;
  2076. goto sq_done;
  2077. }
  2078. sq_done:
  2079. if (rc)
  2080. return rc;
  2081. do_rq:
  2082. cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
  2083. if (cqe_cons == 0xFFFF) {
  2084. goto done;
  2085. } else if (cqe_cons > rq->hwq.max_elements) {
  2086. dev_err(&cq->hwq.pdev->dev,
  2087. "QPLIB: FP: CQ Processed terminal ");
  2088. dev_err(&cq->hwq.pdev->dev,
  2089. "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
  2090. cqe_cons, rq->hwq.max_elements);
  2091. goto done;
  2092. }
  2093. if (qp->rq.flushed) {
  2094. dev_dbg(&cq->hwq.pdev->dev,
  2095. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  2096. rc = 0;
  2097. goto done;
  2098. }
  2099. /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
  2100. * from the current rq->cons to the rq->prod regardless what the
  2101. * rq->cons the terminal CQE indicates
  2102. */
  2103. /* Add qp to flush list of the CQ */
  2104. bnxt_qplib_lock_buddy_cq(qp, cq);
  2105. __bnxt_qplib_add_flush_qp(qp);
  2106. bnxt_qplib_unlock_buddy_cq(qp, cq);
  2107. done:
  2108. return rc;
  2109. }
  2110. static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
  2111. struct cq_cutoff *hwcqe)
  2112. {
  2113. /* Check the Status */
  2114. if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
  2115. dev_err(&cq->hwq.pdev->dev,
  2116. "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
  2117. hwcqe->status);
  2118. return -EINVAL;
  2119. }
  2120. clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
  2121. wake_up_interruptible(&cq->waitq);
  2122. return 0;
  2123. }
  2124. int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
  2125. struct bnxt_qplib_cqe *cqe,
  2126. int num_cqes)
  2127. {
  2128. struct bnxt_qplib_qp *qp = NULL;
  2129. u32 budget = num_cqes;
  2130. unsigned long flags;
  2131. spin_lock_irqsave(&cq->hwq.lock, flags);
  2132. list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
  2133. dev_dbg(&cq->hwq.pdev->dev,
  2134. "QPLIB: FP: Flushing SQ QP= %p",
  2135. qp);
  2136. __flush_sq(&qp->sq, qp, &cqe, &budget);
  2137. }
  2138. list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
  2139. dev_dbg(&cq->hwq.pdev->dev,
  2140. "QPLIB: FP: Flushing RQ QP= %p",
  2141. qp);
  2142. __flush_rq(&qp->rq, qp, &cqe, &budget);
  2143. }
  2144. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  2145. return num_cqes - budget;
  2146. }
  2147. int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
  2148. int num_cqes, struct bnxt_qplib_qp **lib_qp)
  2149. {
  2150. struct cq_base *hw_cqe, **hw_cqe_ptr;
  2151. unsigned long flags;
  2152. u32 sw_cons, raw_cons;
  2153. int budget, rc = 0;
  2154. spin_lock_irqsave(&cq->hwq.lock, flags);
  2155. raw_cons = cq->hwq.cons;
  2156. budget = num_cqes;
  2157. while (budget) {
  2158. sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
  2159. hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
  2160. hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
  2161. /* Check for Valid bit */
  2162. if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
  2163. break;
  2164. /* From the device's respective CQE format to qplib_wc*/
  2165. switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
  2166. case CQ_BASE_CQE_TYPE_REQ:
  2167. rc = bnxt_qplib_cq_process_req(cq,
  2168. (struct cq_req *)hw_cqe,
  2169. &cqe, &budget,
  2170. sw_cons, lib_qp);
  2171. break;
  2172. case CQ_BASE_CQE_TYPE_RES_RC:
  2173. rc = bnxt_qplib_cq_process_res_rc(cq,
  2174. (struct cq_res_rc *)
  2175. hw_cqe, &cqe,
  2176. &budget);
  2177. break;
  2178. case CQ_BASE_CQE_TYPE_RES_UD:
  2179. rc = bnxt_qplib_cq_process_res_ud
  2180. (cq, (struct cq_res_ud *)hw_cqe, &cqe,
  2181. &budget);
  2182. break;
  2183. case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
  2184. rc = bnxt_qplib_cq_process_res_raweth_qp1
  2185. (cq, (struct cq_res_raweth_qp1 *)
  2186. hw_cqe, &cqe, &budget);
  2187. break;
  2188. case CQ_BASE_CQE_TYPE_TERMINAL:
  2189. rc = bnxt_qplib_cq_process_terminal
  2190. (cq, (struct cq_terminal *)hw_cqe,
  2191. &cqe, &budget);
  2192. break;
  2193. case CQ_BASE_CQE_TYPE_CUT_OFF:
  2194. bnxt_qplib_cq_process_cutoff
  2195. (cq, (struct cq_cutoff *)hw_cqe);
  2196. /* Done processing this CQ */
  2197. goto exit;
  2198. default:
  2199. dev_err(&cq->hwq.pdev->dev,
  2200. "QPLIB: process_cq unknown type 0x%lx",
  2201. hw_cqe->cqe_type_toggle &
  2202. CQ_BASE_CQE_TYPE_MASK);
  2203. rc = -EINVAL;
  2204. break;
  2205. }
  2206. if (rc < 0) {
  2207. if (rc == -EAGAIN)
  2208. break;
  2209. /* Error while processing the CQE, just skip to the
  2210. * next one
  2211. */
  2212. dev_err(&cq->hwq.pdev->dev,
  2213. "QPLIB: process_cqe error rc = 0x%x", rc);
  2214. }
  2215. raw_cons++;
  2216. }
  2217. if (cq->hwq.cons != raw_cons) {
  2218. cq->hwq.cons = raw_cons;
  2219. bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
  2220. }
  2221. exit:
  2222. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  2223. return num_cqes - budget;
  2224. }
  2225. void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
  2226. {
  2227. unsigned long flags;
  2228. spin_lock_irqsave(&cq->hwq.lock, flags);
  2229. if (arm_type)
  2230. bnxt_qplib_arm_cq(cq, arm_type);
  2231. /* Using cq->arm_state variable to track whether to issue cq handler */
  2232. atomic_set(&cq->arm_state, 1);
  2233. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  2234. }