qplib_fp.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: Fast Path Operators
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/sched.h>
  41. #include <linux/slab.h>
  42. #include <linux/pci.h>
  43. #include <linux/prefetch.h>
  44. #include "roce_hsi.h"
  45. #include "qplib_res.h"
  46. #include "qplib_rcfw.h"
  47. #include "qplib_sp.h"
  48. #include "qplib_fp.h"
  49. static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
  50. static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
  51. static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
  52. {
  53. qp->sq.condition = false;
  54. qp->sq.send_phantom = false;
  55. qp->sq.single = false;
  56. }
  57. /* Flush list */
  58. static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
  59. {
  60. struct bnxt_qplib_cq *scq, *rcq;
  61. scq = qp->scq;
  62. rcq = qp->rcq;
  63. if (!qp->sq.flushed) {
  64. dev_dbg(&scq->hwq.pdev->dev,
  65. "QPLIB: FP: Adding to SQ Flush list = %p",
  66. qp);
  67. bnxt_qplib_cancel_phantom_processing(qp);
  68. list_add_tail(&qp->sq_flush, &scq->sqf_head);
  69. qp->sq.flushed = true;
  70. }
  71. if (!qp->srq) {
  72. if (!qp->rq.flushed) {
  73. dev_dbg(&rcq->hwq.pdev->dev,
  74. "QPLIB: FP: Adding to RQ Flush list = %p",
  75. qp);
  76. list_add_tail(&qp->rq_flush, &rcq->rqf_head);
  77. qp->rq.flushed = true;
  78. }
  79. }
  80. }
  81. void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
  82. unsigned long *flags)
  83. __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
  84. {
  85. spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
  86. if (qp->scq == qp->rcq)
  87. __acquire(&qp->rcq->hwq.lock);
  88. else
  89. spin_lock(&qp->rcq->hwq.lock);
  90. }
  91. void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
  92. unsigned long *flags)
  93. __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
  94. {
  95. if (qp->scq == qp->rcq)
  96. __release(&qp->rcq->hwq.lock);
  97. else
  98. spin_unlock(&qp->rcq->hwq.lock);
  99. spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
  100. }
  101. static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
  102. struct bnxt_qplib_cq *cq)
  103. {
  104. struct bnxt_qplib_cq *buddy_cq = NULL;
  105. if (qp->scq == qp->rcq)
  106. buddy_cq = NULL;
  107. else if (qp->scq == cq)
  108. buddy_cq = qp->rcq;
  109. else
  110. buddy_cq = qp->scq;
  111. return buddy_cq;
  112. }
  113. static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
  114. struct bnxt_qplib_cq *cq)
  115. __acquires(&buddy_cq->hwq.lock)
  116. {
  117. struct bnxt_qplib_cq *buddy_cq = NULL;
  118. buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
  119. if (!buddy_cq)
  120. __acquire(&cq->hwq.lock);
  121. else
  122. spin_lock(&buddy_cq->hwq.lock);
  123. }
  124. static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
  125. struct bnxt_qplib_cq *cq)
  126. __releases(&buddy_cq->hwq.lock)
  127. {
  128. struct bnxt_qplib_cq *buddy_cq = NULL;
  129. buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
  130. if (!buddy_cq)
  131. __release(&cq->hwq.lock);
  132. else
  133. spin_unlock(&buddy_cq->hwq.lock);
  134. }
  135. void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
  136. {
  137. unsigned long flags;
  138. bnxt_qplib_acquire_cq_locks(qp, &flags);
  139. __bnxt_qplib_add_flush_qp(qp);
  140. bnxt_qplib_release_cq_locks(qp, &flags);
  141. }
  142. static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
  143. {
  144. if (qp->sq.flushed) {
  145. qp->sq.flushed = false;
  146. list_del(&qp->sq_flush);
  147. }
  148. if (!qp->srq) {
  149. if (qp->rq.flushed) {
  150. qp->rq.flushed = false;
  151. list_del(&qp->rq_flush);
  152. }
  153. }
  154. }
  155. void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
  156. {
  157. unsigned long flags;
  158. bnxt_qplib_acquire_cq_locks(qp, &flags);
  159. __clean_cq(qp->scq, (u64)(unsigned long)qp);
  160. qp->sq.hwq.prod = 0;
  161. qp->sq.hwq.cons = 0;
  162. __clean_cq(qp->rcq, (u64)(unsigned long)qp);
  163. qp->rq.hwq.prod = 0;
  164. qp->rq.hwq.cons = 0;
  165. __bnxt_qplib_del_flush_qp(qp);
  166. bnxt_qplib_release_cq_locks(qp, &flags);
  167. }
  168. static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
  169. {
  170. struct bnxt_qplib_nq_work *nq_work =
  171. container_of(work, struct bnxt_qplib_nq_work, work);
  172. struct bnxt_qplib_cq *cq = nq_work->cq;
  173. struct bnxt_qplib_nq *nq = nq_work->nq;
  174. if (cq && nq) {
  175. spin_lock_bh(&cq->compl_lock);
  176. if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
  177. dev_dbg(&nq->pdev->dev,
  178. "%s:Trigger cq = %p event nq = %p\n",
  179. __func__, cq, nq);
  180. nq->cqn_handler(nq, cq);
  181. }
  182. spin_unlock_bh(&cq->compl_lock);
  183. }
  184. kfree(nq_work);
  185. }
  186. static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
  187. struct bnxt_qplib_qp *qp)
  188. {
  189. struct bnxt_qplib_q *rq = &qp->rq;
  190. struct bnxt_qplib_q *sq = &qp->sq;
  191. if (qp->rq_hdr_buf)
  192. dma_free_coherent(&res->pdev->dev,
  193. rq->hwq.max_elements * qp->rq_hdr_buf_size,
  194. qp->rq_hdr_buf, qp->rq_hdr_buf_map);
  195. if (qp->sq_hdr_buf)
  196. dma_free_coherent(&res->pdev->dev,
  197. sq->hwq.max_elements * qp->sq_hdr_buf_size,
  198. qp->sq_hdr_buf, qp->sq_hdr_buf_map);
  199. qp->rq_hdr_buf = NULL;
  200. qp->sq_hdr_buf = NULL;
  201. qp->rq_hdr_buf_map = 0;
  202. qp->sq_hdr_buf_map = 0;
  203. qp->sq_hdr_buf_size = 0;
  204. qp->rq_hdr_buf_size = 0;
  205. }
  206. static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
  207. struct bnxt_qplib_qp *qp)
  208. {
  209. struct bnxt_qplib_q *rq = &qp->rq;
  210. struct bnxt_qplib_q *sq = &qp->rq;
  211. int rc = 0;
  212. if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
  213. qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
  214. sq->hwq.max_elements *
  215. qp->sq_hdr_buf_size,
  216. &qp->sq_hdr_buf_map, GFP_KERNEL);
  217. if (!qp->sq_hdr_buf) {
  218. rc = -ENOMEM;
  219. dev_err(&res->pdev->dev,
  220. "QPLIB: Failed to create sq_hdr_buf");
  221. goto fail;
  222. }
  223. }
  224. if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
  225. qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
  226. rq->hwq.max_elements *
  227. qp->rq_hdr_buf_size,
  228. &qp->rq_hdr_buf_map,
  229. GFP_KERNEL);
  230. if (!qp->rq_hdr_buf) {
  231. rc = -ENOMEM;
  232. dev_err(&res->pdev->dev,
  233. "QPLIB: Failed to create rq_hdr_buf");
  234. goto fail;
  235. }
  236. }
  237. return 0;
  238. fail:
  239. bnxt_qplib_free_qp_hdr_buf(res, qp);
  240. return rc;
  241. }
  242. static void bnxt_qplib_service_nq(unsigned long data)
  243. {
  244. struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
  245. struct bnxt_qplib_hwq *hwq = &nq->hwq;
  246. struct nq_base *nqe, **nq_ptr;
  247. struct bnxt_qplib_cq *cq;
  248. int num_cqne_processed = 0;
  249. u32 sw_cons, raw_cons;
  250. u16 type;
  251. int budget = nq->budget;
  252. u64 q_handle;
  253. /* Service the NQ until empty */
  254. raw_cons = hwq->cons;
  255. while (budget--) {
  256. sw_cons = HWQ_CMP(raw_cons, hwq);
  257. nq_ptr = (struct nq_base **)hwq->pbl_ptr;
  258. nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
  259. if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
  260. break;
  261. /*
  262. * The valid test of the entry must be done first before
  263. * reading any further.
  264. */
  265. dma_rmb();
  266. type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
  267. switch (type) {
  268. case NQ_BASE_TYPE_CQ_NOTIFICATION:
  269. {
  270. struct nq_cn *nqcne = (struct nq_cn *)nqe;
  271. q_handle = le32_to_cpu(nqcne->cq_handle_low);
  272. q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
  273. << 32;
  274. cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
  275. bnxt_qplib_arm_cq_enable(cq);
  276. spin_lock_bh(&cq->compl_lock);
  277. atomic_set(&cq->arm_state, 0);
  278. if (!nq->cqn_handler(nq, (cq)))
  279. num_cqne_processed++;
  280. else
  281. dev_warn(&nq->pdev->dev,
  282. "QPLIB: cqn - type 0x%x not handled",
  283. type);
  284. spin_unlock_bh(&cq->compl_lock);
  285. break;
  286. }
  287. case NQ_BASE_TYPE_DBQ_EVENT:
  288. break;
  289. default:
  290. dev_warn(&nq->pdev->dev,
  291. "QPLIB: nqe with type = 0x%x not handled",
  292. type);
  293. break;
  294. }
  295. raw_cons++;
  296. }
  297. if (hwq->cons != raw_cons) {
  298. hwq->cons = raw_cons;
  299. NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
  300. }
  301. }
  302. static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
  303. {
  304. struct bnxt_qplib_nq *nq = dev_instance;
  305. struct bnxt_qplib_hwq *hwq = &nq->hwq;
  306. struct nq_base **nq_ptr;
  307. u32 sw_cons;
  308. /* Prefetch the NQ element */
  309. sw_cons = HWQ_CMP(hwq->cons, hwq);
  310. nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
  311. prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
  312. /* Fan out to CPU affinitized kthreads? */
  313. tasklet_schedule(&nq->worker);
  314. return IRQ_HANDLED;
  315. }
  316. void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
  317. {
  318. if (nq->cqn_wq) {
  319. destroy_workqueue(nq->cqn_wq);
  320. nq->cqn_wq = NULL;
  321. }
  322. /* Make sure the HW is stopped! */
  323. synchronize_irq(nq->vector);
  324. tasklet_disable(&nq->worker);
  325. tasklet_kill(&nq->worker);
  326. if (nq->requested) {
  327. irq_set_affinity_hint(nq->vector, NULL);
  328. free_irq(nq->vector, nq);
  329. nq->requested = false;
  330. }
  331. if (nq->bar_reg_iomem)
  332. iounmap(nq->bar_reg_iomem);
  333. nq->bar_reg_iomem = NULL;
  334. nq->cqn_handler = NULL;
  335. nq->srqn_handler = NULL;
  336. nq->vector = 0;
  337. }
  338. int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
  339. int nq_idx, int msix_vector, int bar_reg_offset,
  340. int (*cqn_handler)(struct bnxt_qplib_nq *nq,
  341. struct bnxt_qplib_cq *),
  342. int (*srqn_handler)(struct bnxt_qplib_nq *nq,
  343. void *, u8 event))
  344. {
  345. resource_size_t nq_base;
  346. int rc = -1;
  347. nq->pdev = pdev;
  348. nq->vector = msix_vector;
  349. nq->cqn_handler = cqn_handler;
  350. nq->srqn_handler = srqn_handler;
  351. tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
  352. /* Have a task to schedule CQ notifiers in post send case */
  353. nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
  354. if (!nq->cqn_wq)
  355. goto fail;
  356. nq->requested = false;
  357. memset(nq->name, 0, 32);
  358. sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
  359. rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
  360. if (rc) {
  361. dev_err(&nq->pdev->dev,
  362. "Failed to request IRQ for NQ: %#x", rc);
  363. bnxt_qplib_disable_nq(nq);
  364. goto fail;
  365. }
  366. cpumask_clear(&nq->mask);
  367. cpumask_set_cpu(nq_idx, &nq->mask);
  368. rc = irq_set_affinity_hint(nq->vector, &nq->mask);
  369. if (rc) {
  370. dev_warn(&nq->pdev->dev,
  371. "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
  372. nq->vector, nq_idx);
  373. }
  374. nq->requested = true;
  375. nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
  376. nq->bar_reg_off = bar_reg_offset;
  377. nq_base = pci_resource_start(pdev, nq->bar_reg);
  378. if (!nq_base) {
  379. rc = -ENOMEM;
  380. goto fail;
  381. }
  382. nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
  383. if (!nq->bar_reg_iomem) {
  384. rc = -ENOMEM;
  385. goto fail;
  386. }
  387. NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
  388. return 0;
  389. fail:
  390. bnxt_qplib_disable_nq(nq);
  391. return rc;
  392. }
  393. void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
  394. {
  395. if (nq->hwq.max_elements) {
  396. bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
  397. nq->hwq.max_elements = 0;
  398. }
  399. }
  400. int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
  401. {
  402. nq->pdev = pdev;
  403. if (!nq->hwq.max_elements ||
  404. nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
  405. nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
  406. if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
  407. &nq->hwq.max_elements,
  408. BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
  409. PAGE_SIZE, HWQ_TYPE_L2_CMPL))
  410. return -ENOMEM;
  411. nq->budget = 8;
  412. return 0;
  413. }
  414. /* QP */
  415. int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  416. {
  417. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  418. struct cmdq_create_qp1 req;
  419. struct creq_create_qp1_resp resp;
  420. struct bnxt_qplib_pbl *pbl;
  421. struct bnxt_qplib_q *sq = &qp->sq;
  422. struct bnxt_qplib_q *rq = &qp->rq;
  423. int rc;
  424. u16 cmd_flags = 0;
  425. u32 qp_flags = 0;
  426. RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
  427. /* General */
  428. req.type = qp->type;
  429. req.dpi = cpu_to_le32(qp->dpi->dpi);
  430. req.qp_handle = cpu_to_le64(qp->qp_handle);
  431. /* SQ */
  432. sq->hwq.max_elements = sq->max_wqe;
  433. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
  434. &sq->hwq.max_elements,
  435. BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
  436. PAGE_SIZE, HWQ_TYPE_QUEUE);
  437. if (rc)
  438. goto exit;
  439. sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
  440. if (!sq->swq) {
  441. rc = -ENOMEM;
  442. goto fail_sq;
  443. }
  444. pbl = &sq->hwq.pbl[PBL_LVL_0];
  445. req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  446. req.sq_pg_size_sq_lvl =
  447. ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
  448. << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
  449. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  450. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
  451. pbl->pg_size == ROCE_PG_SIZE_8K ?
  452. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
  453. pbl->pg_size == ROCE_PG_SIZE_64K ?
  454. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
  455. pbl->pg_size == ROCE_PG_SIZE_2M ?
  456. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
  457. pbl->pg_size == ROCE_PG_SIZE_8M ?
  458. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
  459. pbl->pg_size == ROCE_PG_SIZE_1G ?
  460. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
  461. CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
  462. if (qp->scq)
  463. req.scq_cid = cpu_to_le32(qp->scq->id);
  464. qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
  465. /* RQ */
  466. if (rq->max_wqe) {
  467. rq->hwq.max_elements = qp->rq.max_wqe;
  468. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
  469. &rq->hwq.max_elements,
  470. BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
  471. PAGE_SIZE, HWQ_TYPE_QUEUE);
  472. if (rc)
  473. goto fail_sq;
  474. rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
  475. GFP_KERNEL);
  476. if (!rq->swq) {
  477. rc = -ENOMEM;
  478. goto fail_rq;
  479. }
  480. pbl = &rq->hwq.pbl[PBL_LVL_0];
  481. req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  482. req.rq_pg_size_rq_lvl =
  483. ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
  484. CMDQ_CREATE_QP1_RQ_LVL_SFT) |
  485. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  486. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
  487. pbl->pg_size == ROCE_PG_SIZE_8K ?
  488. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
  489. pbl->pg_size == ROCE_PG_SIZE_64K ?
  490. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
  491. pbl->pg_size == ROCE_PG_SIZE_2M ?
  492. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
  493. pbl->pg_size == ROCE_PG_SIZE_8M ?
  494. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
  495. pbl->pg_size == ROCE_PG_SIZE_1G ?
  496. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
  497. CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
  498. if (qp->rcq)
  499. req.rcq_cid = cpu_to_le32(qp->rcq->id);
  500. }
  501. /* Header buffer - allow hdr_buf pass in */
  502. rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
  503. if (rc) {
  504. rc = -ENOMEM;
  505. goto fail;
  506. }
  507. req.qp_flags = cpu_to_le32(qp_flags);
  508. req.sq_size = cpu_to_le32(sq->hwq.max_elements);
  509. req.rq_size = cpu_to_le32(rq->hwq.max_elements);
  510. req.sq_fwo_sq_sge =
  511. cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
  512. CMDQ_CREATE_QP1_SQ_SGE_SFT);
  513. req.rq_fwo_rq_sge =
  514. cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
  515. CMDQ_CREATE_QP1_RQ_SGE_SFT);
  516. req.pd_id = cpu_to_le32(qp->pd->id);
  517. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  518. (void *)&resp, NULL, 0);
  519. if (rc)
  520. goto fail;
  521. qp->id = le32_to_cpu(resp.xid);
  522. qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
  523. rcfw->qp_tbl[qp->id].qp_id = qp->id;
  524. rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
  525. return 0;
  526. fail:
  527. bnxt_qplib_free_qp_hdr_buf(res, qp);
  528. fail_rq:
  529. bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
  530. kfree(rq->swq);
  531. fail_sq:
  532. bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
  533. kfree(sq->swq);
  534. exit:
  535. return rc;
  536. }
  537. int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  538. {
  539. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  540. struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
  541. struct cmdq_create_qp req;
  542. struct creq_create_qp_resp resp;
  543. struct bnxt_qplib_pbl *pbl;
  544. struct sq_psn_search **psn_search_ptr;
  545. unsigned long int psn_search, poff = 0;
  546. struct bnxt_qplib_q *sq = &qp->sq;
  547. struct bnxt_qplib_q *rq = &qp->rq;
  548. struct bnxt_qplib_hwq *xrrq;
  549. int i, rc, req_size, psn_sz;
  550. u16 cmd_flags = 0, max_ssge;
  551. u32 sw_prod, qp_flags = 0;
  552. RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
  553. /* General */
  554. req.type = qp->type;
  555. req.dpi = cpu_to_le32(qp->dpi->dpi);
  556. req.qp_handle = cpu_to_le64(qp->qp_handle);
  557. /* SQ */
  558. psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
  559. sizeof(struct sq_psn_search) : 0;
  560. sq->hwq.max_elements = sq->max_wqe;
  561. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
  562. sq->nmap, &sq->hwq.max_elements,
  563. BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
  564. psn_sz,
  565. PAGE_SIZE, HWQ_TYPE_QUEUE);
  566. if (rc)
  567. goto exit;
  568. sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
  569. if (!sq->swq) {
  570. rc = -ENOMEM;
  571. goto fail_sq;
  572. }
  573. hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
  574. if (psn_sz) {
  575. psn_search_ptr = (struct sq_psn_search **)
  576. &hw_sq_send_ptr[get_sqe_pg
  577. (sq->hwq.max_elements)];
  578. psn_search = (unsigned long int)
  579. &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
  580. [get_sqe_idx(sq->hwq.max_elements)];
  581. if (psn_search & ~PAGE_MASK) {
  582. /* If the psn_search does not start on a page boundary,
  583. * then calculate the offset
  584. */
  585. poff = (psn_search & ~PAGE_MASK) /
  586. BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
  587. }
  588. for (i = 0; i < sq->hwq.max_elements; i++)
  589. sq->swq[i].psn_search =
  590. &psn_search_ptr[get_psne_pg(i + poff)]
  591. [get_psne_idx(i + poff)];
  592. }
  593. pbl = &sq->hwq.pbl[PBL_LVL_0];
  594. req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  595. req.sq_pg_size_sq_lvl =
  596. ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
  597. << CMDQ_CREATE_QP_SQ_LVL_SFT) |
  598. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  599. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
  600. pbl->pg_size == ROCE_PG_SIZE_8K ?
  601. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
  602. pbl->pg_size == ROCE_PG_SIZE_64K ?
  603. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
  604. pbl->pg_size == ROCE_PG_SIZE_2M ?
  605. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
  606. pbl->pg_size == ROCE_PG_SIZE_8M ?
  607. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
  608. pbl->pg_size == ROCE_PG_SIZE_1G ?
  609. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
  610. CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
  611. /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
  612. hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
  613. for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
  614. hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
  615. [get_sqe_idx(sw_prod)];
  616. hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
  617. }
  618. if (qp->scq)
  619. req.scq_cid = cpu_to_le32(qp->scq->id);
  620. qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
  621. qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
  622. if (qp->sig_type)
  623. qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
  624. /* RQ */
  625. if (rq->max_wqe) {
  626. rq->hwq.max_elements = rq->max_wqe;
  627. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
  628. rq->nmap, &rq->hwq.max_elements,
  629. BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
  630. PAGE_SIZE, HWQ_TYPE_QUEUE);
  631. if (rc)
  632. goto fail_sq;
  633. rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
  634. GFP_KERNEL);
  635. if (!rq->swq) {
  636. rc = -ENOMEM;
  637. goto fail_rq;
  638. }
  639. pbl = &rq->hwq.pbl[PBL_LVL_0];
  640. req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  641. req.rq_pg_size_rq_lvl =
  642. ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
  643. CMDQ_CREATE_QP_RQ_LVL_SFT) |
  644. (pbl->pg_size == ROCE_PG_SIZE_4K ?
  645. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
  646. pbl->pg_size == ROCE_PG_SIZE_8K ?
  647. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
  648. pbl->pg_size == ROCE_PG_SIZE_64K ?
  649. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
  650. pbl->pg_size == ROCE_PG_SIZE_2M ?
  651. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
  652. pbl->pg_size == ROCE_PG_SIZE_8M ?
  653. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
  654. pbl->pg_size == ROCE_PG_SIZE_1G ?
  655. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
  656. CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
  657. }
  658. if (qp->rcq)
  659. req.rcq_cid = cpu_to_le32(qp->rcq->id);
  660. req.qp_flags = cpu_to_le32(qp_flags);
  661. req.sq_size = cpu_to_le32(sq->hwq.max_elements);
  662. req.rq_size = cpu_to_le32(rq->hwq.max_elements);
  663. qp->sq_hdr_buf = NULL;
  664. qp->rq_hdr_buf = NULL;
  665. rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
  666. if (rc)
  667. goto fail_rq;
  668. /* CTRL-22434: Irrespective of the requested SGE count on the SQ
  669. * always create the QP with max send sges possible if the requested
  670. * inline size is greater than 0.
  671. */
  672. max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
  673. req.sq_fwo_sq_sge = cpu_to_le16(
  674. ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
  675. << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
  676. req.rq_fwo_rq_sge = cpu_to_le16(
  677. ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
  678. << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
  679. /* ORRQ and IRRQ */
  680. if (psn_sz) {
  681. xrrq = &qp->orrq;
  682. xrrq->max_elements =
  683. ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
  684. req_size = xrrq->max_elements *
  685. BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
  686. req_size &= ~(PAGE_SIZE - 1);
  687. rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
  688. &xrrq->max_elements,
  689. BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
  690. 0, req_size, HWQ_TYPE_CTX);
  691. if (rc)
  692. goto fail_buf_free;
  693. pbl = &xrrq->pbl[PBL_LVL_0];
  694. req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
  695. xrrq = &qp->irrq;
  696. xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
  697. qp->max_dest_rd_atomic);
  698. req_size = xrrq->max_elements *
  699. BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
  700. req_size &= ~(PAGE_SIZE - 1);
  701. rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
  702. &xrrq->max_elements,
  703. BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
  704. 0, req_size, HWQ_TYPE_CTX);
  705. if (rc)
  706. goto fail_orrq;
  707. pbl = &xrrq->pbl[PBL_LVL_0];
  708. req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
  709. }
  710. req.pd_id = cpu_to_le32(qp->pd->id);
  711. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  712. (void *)&resp, NULL, 0);
  713. if (rc)
  714. goto fail;
  715. qp->id = le32_to_cpu(resp.xid);
  716. qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
  717. INIT_LIST_HEAD(&qp->sq_flush);
  718. INIT_LIST_HEAD(&qp->rq_flush);
  719. rcfw->qp_tbl[qp->id].qp_id = qp->id;
  720. rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
  721. return 0;
  722. fail:
  723. if (qp->irrq.max_elements)
  724. bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
  725. fail_orrq:
  726. if (qp->orrq.max_elements)
  727. bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
  728. fail_buf_free:
  729. bnxt_qplib_free_qp_hdr_buf(res, qp);
  730. fail_rq:
  731. bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
  732. kfree(rq->swq);
  733. fail_sq:
  734. bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
  735. kfree(sq->swq);
  736. exit:
  737. return rc;
  738. }
  739. static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
  740. {
  741. switch (qp->state) {
  742. case CMDQ_MODIFY_QP_NEW_STATE_RTR:
  743. /* INIT->RTR, configure the path_mtu to the default
  744. * 2048 if not being requested
  745. */
  746. if (!(qp->modify_flags &
  747. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
  748. qp->modify_flags |=
  749. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  750. qp->path_mtu =
  751. CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  752. }
  753. qp->modify_flags &=
  754. ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
  755. /* Bono FW require the max_dest_rd_atomic to be >= 1 */
  756. if (qp->max_dest_rd_atomic < 1)
  757. qp->max_dest_rd_atomic = 1;
  758. qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
  759. /* Bono FW 20.6.5 requires SGID_INDEX configuration */
  760. if (!(qp->modify_flags &
  761. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
  762. qp->modify_flags |=
  763. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
  764. qp->ah.sgid_index = 0;
  765. }
  766. break;
  767. default:
  768. break;
  769. }
  770. }
  771. static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
  772. {
  773. switch (qp->state) {
  774. case CMDQ_MODIFY_QP_NEW_STATE_RTS:
  775. /* Bono FW requires the max_rd_atomic to be >= 1 */
  776. if (qp->max_rd_atomic < 1)
  777. qp->max_rd_atomic = 1;
  778. /* Bono FW does not allow PKEY_INDEX,
  779. * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
  780. * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
  781. * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
  782. * modification
  783. */
  784. qp->modify_flags &=
  785. ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
  786. CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
  787. CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
  788. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
  789. CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
  790. CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
  791. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
  792. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
  793. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
  794. CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
  795. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
  796. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
  797. break;
  798. default:
  799. break;
  800. }
  801. }
  802. static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
  803. {
  804. switch (qp->cur_qp_state) {
  805. case CMDQ_MODIFY_QP_NEW_STATE_RESET:
  806. break;
  807. case CMDQ_MODIFY_QP_NEW_STATE_INIT:
  808. __modify_flags_from_init_state(qp);
  809. break;
  810. case CMDQ_MODIFY_QP_NEW_STATE_RTR:
  811. __modify_flags_from_rtr_state(qp);
  812. break;
  813. case CMDQ_MODIFY_QP_NEW_STATE_RTS:
  814. break;
  815. case CMDQ_MODIFY_QP_NEW_STATE_SQD:
  816. break;
  817. case CMDQ_MODIFY_QP_NEW_STATE_SQE:
  818. break;
  819. case CMDQ_MODIFY_QP_NEW_STATE_ERR:
  820. break;
  821. default:
  822. break;
  823. }
  824. }
  825. int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  826. {
  827. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  828. struct cmdq_modify_qp req;
  829. struct creq_modify_qp_resp resp;
  830. u16 cmd_flags = 0, pkey;
  831. u32 temp32[4];
  832. u32 bmask;
  833. int rc;
  834. RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
  835. /* Filter out the qp_attr_mask based on the state->new transition */
  836. __filter_modify_flags(qp);
  837. bmask = qp->modify_flags;
  838. req.modify_mask = cpu_to_le32(qp->modify_flags);
  839. req.qp_cid = cpu_to_le32(qp->id);
  840. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
  841. req.network_type_en_sqd_async_notify_new_state =
  842. (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
  843. (qp->en_sqd_async_notify ?
  844. CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
  845. }
  846. req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
  847. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
  848. req.access = qp->access;
  849. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
  850. if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
  851. qp->pkey_index, &pkey))
  852. req.pkey = cpu_to_le16(pkey);
  853. }
  854. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
  855. req.qkey = cpu_to_le32(qp->qkey);
  856. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
  857. memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
  858. req.dgid[0] = cpu_to_le32(temp32[0]);
  859. req.dgid[1] = cpu_to_le32(temp32[1]);
  860. req.dgid[2] = cpu_to_le32(temp32[2]);
  861. req.dgid[3] = cpu_to_le32(temp32[3]);
  862. }
  863. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
  864. req.flow_label = cpu_to_le32(qp->ah.flow_label);
  865. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
  866. req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
  867. [qp->ah.sgid_index]);
  868. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
  869. req.hop_limit = qp->ah.hop_limit;
  870. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
  871. req.traffic_class = qp->ah.traffic_class;
  872. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
  873. memcpy(req.dest_mac, qp->ah.dmac, 6);
  874. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
  875. req.path_mtu = qp->path_mtu;
  876. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
  877. req.timeout = qp->timeout;
  878. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
  879. req.retry_cnt = qp->retry_cnt;
  880. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
  881. req.rnr_retry = qp->rnr_retry;
  882. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
  883. req.min_rnr_timer = qp->min_rnr_timer;
  884. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
  885. req.rq_psn = cpu_to_le32(qp->rq.psn);
  886. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
  887. req.sq_psn = cpu_to_le32(qp->sq.psn);
  888. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
  889. req.max_rd_atomic =
  890. ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
  891. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
  892. req.max_dest_rd_atomic =
  893. IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
  894. req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
  895. req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
  896. req.sq_sge = cpu_to_le16(qp->sq.max_sge);
  897. req.rq_sge = cpu_to_le16(qp->rq.max_sge);
  898. req.max_inline_data = cpu_to_le32(qp->max_inline_data);
  899. if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
  900. req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
  901. req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
  902. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  903. (void *)&resp, NULL, 0);
  904. if (rc)
  905. return rc;
  906. qp->cur_qp_state = qp->state;
  907. return 0;
  908. }
  909. int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
  910. {
  911. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  912. struct cmdq_query_qp req;
  913. struct creq_query_qp_resp resp;
  914. struct bnxt_qplib_rcfw_sbuf *sbuf;
  915. struct creq_query_qp_resp_sb *sb;
  916. u16 cmd_flags = 0;
  917. u32 temp32[4];
  918. int i, rc = 0;
  919. RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
  920. sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
  921. if (!sbuf)
  922. return -ENOMEM;
  923. sb = sbuf->sb;
  924. req.qp_cid = cpu_to_le32(qp->id);
  925. req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
  926. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  927. (void *)sbuf, 0);
  928. if (rc)
  929. goto bail;
  930. /* Extract the context from the side buffer */
  931. qp->state = sb->en_sqd_async_notify_state &
  932. CREQ_QUERY_QP_RESP_SB_STATE_MASK;
  933. qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
  934. CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
  935. true : false;
  936. qp->access = sb->access;
  937. qp->pkey_index = le16_to_cpu(sb->pkey);
  938. qp->qkey = le32_to_cpu(sb->qkey);
  939. temp32[0] = le32_to_cpu(sb->dgid[0]);
  940. temp32[1] = le32_to_cpu(sb->dgid[1]);
  941. temp32[2] = le32_to_cpu(sb->dgid[2]);
  942. temp32[3] = le32_to_cpu(sb->dgid[3]);
  943. memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
  944. qp->ah.flow_label = le32_to_cpu(sb->flow_label);
  945. qp->ah.sgid_index = 0;
  946. for (i = 0; i < res->sgid_tbl.max; i++) {
  947. if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
  948. qp->ah.sgid_index = i;
  949. break;
  950. }
  951. }
  952. if (i == res->sgid_tbl.max)
  953. dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
  954. qp->ah.hop_limit = sb->hop_limit;
  955. qp->ah.traffic_class = sb->traffic_class;
  956. memcpy(qp->ah.dmac, sb->dest_mac, 6);
  957. qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
  958. CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
  959. CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
  960. qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
  961. CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
  962. CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
  963. qp->timeout = sb->timeout;
  964. qp->retry_cnt = sb->retry_cnt;
  965. qp->rnr_retry = sb->rnr_retry;
  966. qp->min_rnr_timer = sb->min_rnr_timer;
  967. qp->rq.psn = le32_to_cpu(sb->rq_psn);
  968. qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
  969. qp->sq.psn = le32_to_cpu(sb->sq_psn);
  970. qp->max_dest_rd_atomic =
  971. IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
  972. qp->sq.max_wqe = qp->sq.hwq.max_elements;
  973. qp->rq.max_wqe = qp->rq.hwq.max_elements;
  974. qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
  975. qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
  976. qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
  977. qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
  978. memcpy(qp->smac, sb->src_mac, 6);
  979. qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
  980. bail:
  981. bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
  982. return rc;
  983. }
  984. static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
  985. {
  986. struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
  987. struct cq_base *hw_cqe, **hw_cqe_ptr;
  988. int i;
  989. for (i = 0; i < cq_hwq->max_elements; i++) {
  990. hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
  991. hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
  992. if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
  993. continue;
  994. /*
  995. * The valid test of the entry must be done first before
  996. * reading any further.
  997. */
  998. dma_rmb();
  999. switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
  1000. case CQ_BASE_CQE_TYPE_REQ:
  1001. case CQ_BASE_CQE_TYPE_TERMINAL:
  1002. {
  1003. struct cq_req *cqe = (struct cq_req *)hw_cqe;
  1004. if (qp == le64_to_cpu(cqe->qp_handle))
  1005. cqe->qp_handle = 0;
  1006. break;
  1007. }
  1008. case CQ_BASE_CQE_TYPE_RES_RC:
  1009. case CQ_BASE_CQE_TYPE_RES_UD:
  1010. case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
  1011. {
  1012. struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
  1013. if (qp == le64_to_cpu(cqe->qp_handle))
  1014. cqe->qp_handle = 0;
  1015. break;
  1016. }
  1017. default:
  1018. break;
  1019. }
  1020. }
  1021. }
  1022. int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
  1023. struct bnxt_qplib_qp *qp)
  1024. {
  1025. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  1026. struct cmdq_destroy_qp req;
  1027. struct creq_destroy_qp_resp resp;
  1028. unsigned long flags;
  1029. u16 cmd_flags = 0;
  1030. int rc;
  1031. rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
  1032. rcfw->qp_tbl[qp->id].qp_handle = NULL;
  1033. RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
  1034. req.qp_cid = cpu_to_le32(qp->id);
  1035. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  1036. (void *)&resp, NULL, 0);
  1037. if (rc) {
  1038. rcfw->qp_tbl[qp->id].qp_id = qp->id;
  1039. rcfw->qp_tbl[qp->id].qp_handle = qp;
  1040. return rc;
  1041. }
  1042. /* Must walk the associated CQs to nullified the QP ptr */
  1043. spin_lock_irqsave(&qp->scq->hwq.lock, flags);
  1044. __clean_cq(qp->scq, (u64)(unsigned long)qp);
  1045. if (qp->rcq && qp->rcq != qp->scq) {
  1046. spin_lock(&qp->rcq->hwq.lock);
  1047. __clean_cq(qp->rcq, (u64)(unsigned long)qp);
  1048. spin_unlock(&qp->rcq->hwq.lock);
  1049. }
  1050. spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
  1051. bnxt_qplib_free_qp_hdr_buf(res, qp);
  1052. bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
  1053. kfree(qp->sq.swq);
  1054. bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
  1055. kfree(qp->rq.swq);
  1056. if (qp->irrq.max_elements)
  1057. bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
  1058. if (qp->orrq.max_elements)
  1059. bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
  1060. return 0;
  1061. }
  1062. void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
  1063. struct bnxt_qplib_sge *sge)
  1064. {
  1065. struct bnxt_qplib_q *sq = &qp->sq;
  1066. u32 sw_prod;
  1067. memset(sge, 0, sizeof(*sge));
  1068. if (qp->sq_hdr_buf) {
  1069. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1070. sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
  1071. sw_prod * qp->sq_hdr_buf_size);
  1072. sge->lkey = 0xFFFFFFFF;
  1073. sge->size = qp->sq_hdr_buf_size;
  1074. return qp->sq_hdr_buf + sw_prod * sge->size;
  1075. }
  1076. return NULL;
  1077. }
  1078. u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
  1079. {
  1080. struct bnxt_qplib_q *rq = &qp->rq;
  1081. return HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1082. }
  1083. dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
  1084. {
  1085. return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
  1086. }
  1087. void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
  1088. struct bnxt_qplib_sge *sge)
  1089. {
  1090. struct bnxt_qplib_q *rq = &qp->rq;
  1091. u32 sw_prod;
  1092. memset(sge, 0, sizeof(*sge));
  1093. if (qp->rq_hdr_buf) {
  1094. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1095. sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
  1096. sw_prod * qp->rq_hdr_buf_size);
  1097. sge->lkey = 0xFFFFFFFF;
  1098. sge->size = qp->rq_hdr_buf_size;
  1099. return qp->rq_hdr_buf + sw_prod * sge->size;
  1100. }
  1101. return NULL;
  1102. }
  1103. void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
  1104. {
  1105. struct bnxt_qplib_q *sq = &qp->sq;
  1106. struct dbr_dbr db_msg = { 0 };
  1107. u32 sw_prod;
  1108. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1109. db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
  1110. DBR_DBR_INDEX_MASK);
  1111. db_msg.type_xid =
  1112. cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1113. DBR_DBR_TYPE_SQ);
  1114. /* Flush all the WQE writes to HW */
  1115. wmb();
  1116. __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
  1117. }
  1118. int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
  1119. struct bnxt_qplib_swqe *wqe)
  1120. {
  1121. struct bnxt_qplib_q *sq = &qp->sq;
  1122. struct bnxt_qplib_swq *swq;
  1123. struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
  1124. struct sq_sge *hw_sge;
  1125. struct bnxt_qplib_nq_work *nq_work = NULL;
  1126. bool sch_handler = false;
  1127. u32 sw_prod;
  1128. u8 wqe_size16;
  1129. int i, rc = 0, data_len = 0, pkt_num = 0;
  1130. __le32 temp32;
  1131. if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
  1132. if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
  1133. sch_handler = true;
  1134. dev_dbg(&sq->hwq.pdev->dev,
  1135. "%s Error QP. Scheduling for poll_cq\n",
  1136. __func__);
  1137. goto queue_err;
  1138. }
  1139. }
  1140. if (bnxt_qplib_queue_full(sq)) {
  1141. dev_err(&sq->hwq.pdev->dev,
  1142. "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
  1143. sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
  1144. sq->q_full_delta);
  1145. rc = -ENOMEM;
  1146. goto done;
  1147. }
  1148. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1149. swq = &sq->swq[sw_prod];
  1150. swq->wr_id = wqe->wr_id;
  1151. swq->type = wqe->type;
  1152. swq->flags = wqe->flags;
  1153. if (qp->sig_type)
  1154. swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
  1155. swq->start_psn = sq->psn & BTH_PSN_MASK;
  1156. hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
  1157. hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
  1158. [get_sqe_idx(sw_prod)];
  1159. memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
  1160. if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
  1161. /* Copy the inline data */
  1162. if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
  1163. dev_warn(&sq->hwq.pdev->dev,
  1164. "QPLIB: Inline data length > 96 detected");
  1165. data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
  1166. } else {
  1167. data_len = wqe->inline_len;
  1168. }
  1169. memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
  1170. wqe_size16 = (data_len + 15) >> 4;
  1171. } else {
  1172. for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
  1173. i < wqe->num_sge; i++, hw_sge++) {
  1174. hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
  1175. hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
  1176. hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
  1177. data_len += wqe->sg_list[i].size;
  1178. }
  1179. /* Each SGE entry = 1 WQE size16 */
  1180. wqe_size16 = wqe->num_sge;
  1181. /* HW requires wqe size has room for atleast one SGE even if
  1182. * none was supplied by ULP
  1183. */
  1184. if (!wqe->num_sge)
  1185. wqe_size16++;
  1186. }
  1187. /* Specifics */
  1188. switch (wqe->type) {
  1189. case BNXT_QPLIB_SWQE_TYPE_SEND:
  1190. if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
  1191. /* Assemble info for Raw Ethertype QPs */
  1192. struct sq_send_raweth_qp1 *sqe =
  1193. (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
  1194. sqe->wqe_type = wqe->type;
  1195. sqe->flags = wqe->flags;
  1196. sqe->wqe_size = wqe_size16 +
  1197. ((offsetof(typeof(*sqe), data) + 15) >> 4);
  1198. sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
  1199. sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
  1200. sqe->length = cpu_to_le32(data_len);
  1201. sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
  1202. SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
  1203. SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
  1204. break;
  1205. }
  1206. /* fall thru */
  1207. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
  1208. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
  1209. {
  1210. struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
  1211. sqe->wqe_type = wqe->type;
  1212. sqe->flags = wqe->flags;
  1213. sqe->wqe_size = wqe_size16 +
  1214. ((offsetof(typeof(*sqe), data) + 15) >> 4);
  1215. sqe->inv_key_or_imm_data = cpu_to_le32(
  1216. wqe->send.inv_key);
  1217. if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
  1218. sqe->q_key = cpu_to_le32(wqe->send.q_key);
  1219. sqe->dst_qp = cpu_to_le32(
  1220. wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
  1221. sqe->length = cpu_to_le32(data_len);
  1222. sqe->avid = cpu_to_le32(wqe->send.avid &
  1223. SQ_SEND_AVID_MASK);
  1224. sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
  1225. } else {
  1226. sqe->length = cpu_to_le32(data_len);
  1227. sqe->dst_qp = 0;
  1228. sqe->avid = 0;
  1229. if (qp->mtu)
  1230. pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
  1231. if (!pkt_num)
  1232. pkt_num = 1;
  1233. sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
  1234. }
  1235. break;
  1236. }
  1237. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
  1238. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
  1239. case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
  1240. {
  1241. struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
  1242. sqe->wqe_type = wqe->type;
  1243. sqe->flags = wqe->flags;
  1244. sqe->wqe_size = wqe_size16 +
  1245. ((offsetof(typeof(*sqe), data) + 15) >> 4);
  1246. sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
  1247. sqe->length = cpu_to_le32((u32)data_len);
  1248. sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
  1249. sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
  1250. if (qp->mtu)
  1251. pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
  1252. if (!pkt_num)
  1253. pkt_num = 1;
  1254. sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
  1255. break;
  1256. }
  1257. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
  1258. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
  1259. {
  1260. struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
  1261. sqe->wqe_type = wqe->type;
  1262. sqe->flags = wqe->flags;
  1263. sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
  1264. sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
  1265. sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
  1266. sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
  1267. if (qp->mtu)
  1268. pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
  1269. if (!pkt_num)
  1270. pkt_num = 1;
  1271. sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
  1272. break;
  1273. }
  1274. case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
  1275. {
  1276. struct sq_localinvalidate *sqe =
  1277. (struct sq_localinvalidate *)hw_sq_send_hdr;
  1278. sqe->wqe_type = wqe->type;
  1279. sqe->flags = wqe->flags;
  1280. sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
  1281. break;
  1282. }
  1283. case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
  1284. {
  1285. struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
  1286. sqe->wqe_type = wqe->type;
  1287. sqe->flags = wqe->flags;
  1288. sqe->access_cntl = wqe->frmr.access_cntl |
  1289. SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
  1290. sqe->zero_based_page_size_log =
  1291. (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
  1292. SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
  1293. (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
  1294. sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
  1295. temp32 = cpu_to_le32(wqe->frmr.length);
  1296. memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
  1297. sqe->numlevels_pbl_page_size_log =
  1298. ((wqe->frmr.pbl_pg_sz_log <<
  1299. SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
  1300. SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
  1301. ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
  1302. SQ_FR_PMR_NUMLEVELS_MASK);
  1303. for (i = 0; i < wqe->frmr.page_list_len; i++)
  1304. wqe->frmr.pbl_ptr[i] = cpu_to_le64(
  1305. wqe->frmr.page_list[i] |
  1306. PTU_PTE_VALID);
  1307. sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
  1308. sqe->va = cpu_to_le64(wqe->frmr.va);
  1309. break;
  1310. }
  1311. case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
  1312. {
  1313. struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
  1314. sqe->wqe_type = wqe->type;
  1315. sqe->flags = wqe->flags;
  1316. sqe->access_cntl = wqe->bind.access_cntl;
  1317. sqe->mw_type_zero_based = wqe->bind.mw_type |
  1318. (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
  1319. sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
  1320. sqe->l_key = cpu_to_le32(wqe->bind.r_key);
  1321. sqe->va = cpu_to_le64(wqe->bind.va);
  1322. temp32 = cpu_to_le32(wqe->bind.length);
  1323. memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
  1324. break;
  1325. }
  1326. default:
  1327. /* Bad wqe, return error */
  1328. rc = -EINVAL;
  1329. goto done;
  1330. }
  1331. swq->next_psn = sq->psn & BTH_PSN_MASK;
  1332. if (swq->psn_search) {
  1333. swq->psn_search->opcode_start_psn = cpu_to_le32(
  1334. ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
  1335. SQ_PSN_SEARCH_START_PSN_MASK) |
  1336. ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
  1337. SQ_PSN_SEARCH_OPCODE_MASK));
  1338. swq->psn_search->flags_next_psn = cpu_to_le32(
  1339. ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
  1340. SQ_PSN_SEARCH_NEXT_PSN_MASK));
  1341. }
  1342. queue_err:
  1343. if (sch_handler) {
  1344. /* Store the ULP info in the software structures */
  1345. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1346. swq = &sq->swq[sw_prod];
  1347. swq->wr_id = wqe->wr_id;
  1348. swq->type = wqe->type;
  1349. swq->flags = wqe->flags;
  1350. if (qp->sig_type)
  1351. swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
  1352. swq->start_psn = sq->psn & BTH_PSN_MASK;
  1353. }
  1354. sq->hwq.prod++;
  1355. qp->wqe_cnt++;
  1356. done:
  1357. if (sch_handler) {
  1358. nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
  1359. if (nq_work) {
  1360. nq_work->cq = qp->scq;
  1361. nq_work->nq = qp->scq->nq;
  1362. INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
  1363. queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
  1364. } else {
  1365. dev_err(&sq->hwq.pdev->dev,
  1366. "QPLIB: FP: Failed to allocate SQ nq_work!");
  1367. rc = -ENOMEM;
  1368. }
  1369. }
  1370. return rc;
  1371. }
  1372. void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
  1373. {
  1374. struct bnxt_qplib_q *rq = &qp->rq;
  1375. struct dbr_dbr db_msg = { 0 };
  1376. u32 sw_prod;
  1377. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1378. db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
  1379. DBR_DBR_INDEX_MASK);
  1380. db_msg.type_xid =
  1381. cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1382. DBR_DBR_TYPE_RQ);
  1383. /* Flush the writes to HW Rx WQE before the ringing Rx DB */
  1384. wmb();
  1385. __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
  1386. }
  1387. int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
  1388. struct bnxt_qplib_swqe *wqe)
  1389. {
  1390. struct bnxt_qplib_q *rq = &qp->rq;
  1391. struct rq_wqe *rqe, **rqe_ptr;
  1392. struct sq_sge *hw_sge;
  1393. struct bnxt_qplib_nq_work *nq_work = NULL;
  1394. bool sch_handler = false;
  1395. u32 sw_prod;
  1396. int i, rc = 0;
  1397. if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
  1398. sch_handler = true;
  1399. dev_dbg(&rq->hwq.pdev->dev,
  1400. "%s Error QP. Scheduling for poll_cq\n",
  1401. __func__);
  1402. goto queue_err;
  1403. }
  1404. if (bnxt_qplib_queue_full(rq)) {
  1405. dev_err(&rq->hwq.pdev->dev,
  1406. "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
  1407. rc = -EINVAL;
  1408. goto done;
  1409. }
  1410. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1411. rq->swq[sw_prod].wr_id = wqe->wr_id;
  1412. rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
  1413. rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
  1414. memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
  1415. /* Calculate wqe_size16 and data_len */
  1416. for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
  1417. i < wqe->num_sge; i++, hw_sge++) {
  1418. hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
  1419. hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
  1420. hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
  1421. }
  1422. rqe->wqe_type = wqe->type;
  1423. rqe->flags = wqe->flags;
  1424. rqe->wqe_size = wqe->num_sge +
  1425. ((offsetof(typeof(*rqe), data) + 15) >> 4);
  1426. /* HW requires wqe size has room for atleast one SGE even if none
  1427. * was supplied by ULP
  1428. */
  1429. if (!wqe->num_sge)
  1430. rqe->wqe_size++;
  1431. /* Supply the rqe->wr_id index to the wr_id_tbl for now */
  1432. rqe->wr_id[0] = cpu_to_le32(sw_prod);
  1433. queue_err:
  1434. if (sch_handler) {
  1435. /* Store the ULP info in the software structures */
  1436. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1437. rq->swq[sw_prod].wr_id = wqe->wr_id;
  1438. }
  1439. rq->hwq.prod++;
  1440. if (sch_handler) {
  1441. nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
  1442. if (nq_work) {
  1443. nq_work->cq = qp->rcq;
  1444. nq_work->nq = qp->rcq->nq;
  1445. INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
  1446. queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
  1447. } else {
  1448. dev_err(&rq->hwq.pdev->dev,
  1449. "QPLIB: FP: Failed to allocate RQ nq_work!");
  1450. rc = -ENOMEM;
  1451. }
  1452. }
  1453. done:
  1454. return rc;
  1455. }
  1456. /* CQ */
  1457. /* Spinlock must be held */
  1458. static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
  1459. {
  1460. struct dbr_dbr db_msg = { 0 };
  1461. db_msg.type_xid =
  1462. cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1463. DBR_DBR_TYPE_CQ_ARMENA);
  1464. /* Flush memory writes before enabling the CQ */
  1465. wmb();
  1466. __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
  1467. }
  1468. static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
  1469. {
  1470. struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
  1471. struct dbr_dbr db_msg = { 0 };
  1472. u32 sw_cons;
  1473. /* Ring DB */
  1474. sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
  1475. db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
  1476. DBR_DBR_INDEX_MASK);
  1477. db_msg.type_xid =
  1478. cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
  1479. arm_type);
  1480. /* flush memory writes before arming the CQ */
  1481. wmb();
  1482. __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
  1483. }
  1484. int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
  1485. {
  1486. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  1487. struct cmdq_create_cq req;
  1488. struct creq_create_cq_resp resp;
  1489. struct bnxt_qplib_pbl *pbl;
  1490. u16 cmd_flags = 0;
  1491. int rc;
  1492. cq->hwq.max_elements = cq->max_wqe;
  1493. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
  1494. cq->nmap, &cq->hwq.max_elements,
  1495. BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
  1496. PAGE_SIZE, HWQ_TYPE_QUEUE);
  1497. if (rc)
  1498. goto exit;
  1499. RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
  1500. if (!cq->dpi) {
  1501. dev_err(&rcfw->pdev->dev,
  1502. "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
  1503. return -EINVAL;
  1504. }
  1505. req.dpi = cpu_to_le32(cq->dpi->dpi);
  1506. req.cq_handle = cpu_to_le64(cq->cq_handle);
  1507. req.cq_size = cpu_to_le32(cq->hwq.max_elements);
  1508. pbl = &cq->hwq.pbl[PBL_LVL_0];
  1509. req.pg_size_lvl = cpu_to_le32(
  1510. ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
  1511. CMDQ_CREATE_CQ_LVL_SFT) |
  1512. (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
  1513. pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
  1514. pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
  1515. pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
  1516. pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
  1517. pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
  1518. CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
  1519. req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
  1520. req.cq_fco_cnq_id = cpu_to_le32(
  1521. (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
  1522. CMDQ_CREATE_CQ_CNQ_ID_SFT);
  1523. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  1524. (void *)&resp, NULL, 0);
  1525. if (rc)
  1526. goto fail;
  1527. cq->id = le32_to_cpu(resp.xid);
  1528. cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
  1529. cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
  1530. init_waitqueue_head(&cq->waitq);
  1531. INIT_LIST_HEAD(&cq->sqf_head);
  1532. INIT_LIST_HEAD(&cq->rqf_head);
  1533. spin_lock_init(&cq->compl_lock);
  1534. bnxt_qplib_arm_cq_enable(cq);
  1535. return 0;
  1536. fail:
  1537. bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
  1538. exit:
  1539. return rc;
  1540. }
  1541. int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
  1542. {
  1543. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  1544. struct cmdq_destroy_cq req;
  1545. struct creq_destroy_cq_resp resp;
  1546. u16 cmd_flags = 0;
  1547. int rc;
  1548. RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
  1549. req.cq_cid = cpu_to_le32(cq->id);
  1550. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  1551. (void *)&resp, NULL, 0);
  1552. if (rc)
  1553. return rc;
  1554. bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
  1555. return 0;
  1556. }
  1557. static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
  1558. struct bnxt_qplib_cqe **pcqe, int *budget)
  1559. {
  1560. u32 sw_prod, sw_cons;
  1561. struct bnxt_qplib_cqe *cqe;
  1562. int rc = 0;
  1563. /* Now complete all outstanding SQEs with FLUSHED_ERR */
  1564. sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
  1565. cqe = *pcqe;
  1566. while (*budget) {
  1567. sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
  1568. if (sw_cons == sw_prod) {
  1569. break;
  1570. }
  1571. /* Skip the FENCE WQE completions */
  1572. if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
  1573. bnxt_qplib_cancel_phantom_processing(qp);
  1574. goto skip_compl;
  1575. }
  1576. memset(cqe, 0, sizeof(*cqe));
  1577. cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
  1578. cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
  1579. cqe->qp_handle = (u64)(unsigned long)qp;
  1580. cqe->wr_id = sq->swq[sw_cons].wr_id;
  1581. cqe->src_qp = qp->id;
  1582. cqe->type = sq->swq[sw_cons].type;
  1583. cqe++;
  1584. (*budget)--;
  1585. skip_compl:
  1586. sq->hwq.cons++;
  1587. }
  1588. *pcqe = cqe;
  1589. if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
  1590. /* Out of budget */
  1591. rc = -EAGAIN;
  1592. return rc;
  1593. }
  1594. static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
  1595. struct bnxt_qplib_cqe **pcqe, int *budget)
  1596. {
  1597. struct bnxt_qplib_cqe *cqe;
  1598. u32 sw_prod, sw_cons;
  1599. int rc = 0;
  1600. int opcode = 0;
  1601. switch (qp->type) {
  1602. case CMDQ_CREATE_QP1_TYPE_GSI:
  1603. opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
  1604. break;
  1605. case CMDQ_CREATE_QP_TYPE_RC:
  1606. opcode = CQ_BASE_CQE_TYPE_RES_RC;
  1607. break;
  1608. case CMDQ_CREATE_QP_TYPE_UD:
  1609. opcode = CQ_BASE_CQE_TYPE_RES_UD;
  1610. break;
  1611. }
  1612. /* Flush the rest of the RQ */
  1613. sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
  1614. cqe = *pcqe;
  1615. while (*budget) {
  1616. sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
  1617. if (sw_cons == sw_prod)
  1618. break;
  1619. memset(cqe, 0, sizeof(*cqe));
  1620. cqe->status =
  1621. CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
  1622. cqe->opcode = opcode;
  1623. cqe->qp_handle = (unsigned long)qp;
  1624. cqe->wr_id = rq->swq[sw_cons].wr_id;
  1625. cqe++;
  1626. (*budget)--;
  1627. rq->hwq.cons++;
  1628. }
  1629. *pcqe = cqe;
  1630. if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
  1631. /* Out of budget */
  1632. rc = -EAGAIN;
  1633. return rc;
  1634. }
  1635. void bnxt_qplib_mark_qp_error(void *qp_handle)
  1636. {
  1637. struct bnxt_qplib_qp *qp = qp_handle;
  1638. if (!qp)
  1639. return;
  1640. /* Must block new posting of SQ and RQ */
  1641. qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
  1642. bnxt_qplib_cancel_phantom_processing(qp);
  1643. /* Add qp to flush list of the CQ */
  1644. __bnxt_qplib_add_flush_qp(qp);
  1645. }
  1646. /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
  1647. * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
  1648. */
  1649. static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
  1650. u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
  1651. {
  1652. struct bnxt_qplib_q *sq = &qp->sq;
  1653. struct bnxt_qplib_swq *swq;
  1654. u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
  1655. struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
  1656. struct cq_req *peek_req_hwcqe;
  1657. struct bnxt_qplib_qp *peek_qp;
  1658. struct bnxt_qplib_q *peek_sq;
  1659. int i, rc = 0;
  1660. /* Normal mode */
  1661. /* Check for the psn_search marking before completing */
  1662. swq = &sq->swq[sw_sq_cons];
  1663. if (swq->psn_search &&
  1664. le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
  1665. /* Unmark */
  1666. swq->psn_search->flags_next_psn = cpu_to_le32
  1667. (le32_to_cpu(swq->psn_search->flags_next_psn)
  1668. & ~0x80000000);
  1669. dev_dbg(&cq->hwq.pdev->dev,
  1670. "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
  1671. cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
  1672. sq->condition = true;
  1673. sq->send_phantom = true;
  1674. /* TODO: Only ARM if the previous SQE is ARMALL */
  1675. bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
  1676. rc = -EAGAIN;
  1677. goto out;
  1678. }
  1679. if (sq->condition) {
  1680. /* Peek at the completions */
  1681. peek_raw_cq_cons = cq->hwq.cons;
  1682. peek_sw_cq_cons = cq_cons;
  1683. i = cq->hwq.max_elements;
  1684. while (i--) {
  1685. peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
  1686. peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
  1687. peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
  1688. [CQE_IDX(peek_sw_cq_cons)];
  1689. /* If the next hwcqe is VALID */
  1690. if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
  1691. cq->hwq.max_elements)) {
  1692. /*
  1693. * The valid test of the entry must be done first before
  1694. * reading any further.
  1695. */
  1696. dma_rmb();
  1697. /* If the next hwcqe is a REQ */
  1698. if ((peek_hwcqe->cqe_type_toggle &
  1699. CQ_BASE_CQE_TYPE_MASK) ==
  1700. CQ_BASE_CQE_TYPE_REQ) {
  1701. peek_req_hwcqe = (struct cq_req *)
  1702. peek_hwcqe;
  1703. peek_qp = (struct bnxt_qplib_qp *)
  1704. ((unsigned long)
  1705. le64_to_cpu
  1706. (peek_req_hwcqe->qp_handle));
  1707. peek_sq = &peek_qp->sq;
  1708. peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
  1709. peek_req_hwcqe->sq_cons_idx) - 1
  1710. , &sq->hwq);
  1711. /* If the hwcqe's sq's wr_id matches */
  1712. if (peek_sq == sq &&
  1713. sq->swq[peek_sq_cons_idx].wr_id ==
  1714. BNXT_QPLIB_FENCE_WRID) {
  1715. /*
  1716. * Unbreak only if the phantom
  1717. * comes back
  1718. */
  1719. dev_dbg(&cq->hwq.pdev->dev,
  1720. "FP:Got Phantom CQE");
  1721. sq->condition = false;
  1722. sq->single = true;
  1723. rc = 0;
  1724. goto out;
  1725. }
  1726. }
  1727. /* Valid but not the phantom, so keep looping */
  1728. } else {
  1729. /* Not valid yet, just exit and wait */
  1730. rc = -EINVAL;
  1731. goto out;
  1732. }
  1733. peek_sw_cq_cons++;
  1734. peek_raw_cq_cons++;
  1735. }
  1736. dev_err(&cq->hwq.pdev->dev,
  1737. "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
  1738. cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
  1739. rc = -EINVAL;
  1740. }
  1741. out:
  1742. return rc;
  1743. }
  1744. static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
  1745. struct cq_req *hwcqe,
  1746. struct bnxt_qplib_cqe **pcqe, int *budget,
  1747. u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
  1748. {
  1749. struct bnxt_qplib_qp *qp;
  1750. struct bnxt_qplib_q *sq;
  1751. struct bnxt_qplib_cqe *cqe;
  1752. u32 sw_sq_cons, cqe_sq_cons;
  1753. struct bnxt_qplib_swq *swq;
  1754. int rc = 0;
  1755. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1756. le64_to_cpu(hwcqe->qp_handle));
  1757. if (!qp) {
  1758. dev_err(&cq->hwq.pdev->dev,
  1759. "QPLIB: FP: Process Req qp is NULL");
  1760. return -EINVAL;
  1761. }
  1762. sq = &qp->sq;
  1763. cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
  1764. if (cqe_sq_cons > sq->hwq.max_elements) {
  1765. dev_err(&cq->hwq.pdev->dev,
  1766. "QPLIB: FP: CQ Process req reported ");
  1767. dev_err(&cq->hwq.pdev->dev,
  1768. "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
  1769. cqe_sq_cons, sq->hwq.max_elements);
  1770. return -EINVAL;
  1771. }
  1772. if (qp->sq.flushed) {
  1773. dev_dbg(&cq->hwq.pdev->dev,
  1774. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1775. goto done;
  1776. }
  1777. /* Require to walk the sq's swq to fabricate CQEs for all previously
  1778. * signaled SWQEs due to CQE aggregation from the current sq cons
  1779. * to the cqe_sq_cons
  1780. */
  1781. cqe = *pcqe;
  1782. while (*budget) {
  1783. sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
  1784. if (sw_sq_cons == cqe_sq_cons)
  1785. /* Done */
  1786. break;
  1787. swq = &sq->swq[sw_sq_cons];
  1788. memset(cqe, 0, sizeof(*cqe));
  1789. cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
  1790. cqe->qp_handle = (u64)(unsigned long)qp;
  1791. cqe->src_qp = qp->id;
  1792. cqe->wr_id = swq->wr_id;
  1793. if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
  1794. goto skip;
  1795. cqe->type = swq->type;
  1796. /* For the last CQE, check for status. For errors, regardless
  1797. * of the request being signaled or not, it must complete with
  1798. * the hwcqe error status
  1799. */
  1800. if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
  1801. hwcqe->status != CQ_REQ_STATUS_OK) {
  1802. cqe->status = hwcqe->status;
  1803. dev_err(&cq->hwq.pdev->dev,
  1804. "QPLIB: FP: CQ Processed Req ");
  1805. dev_err(&cq->hwq.pdev->dev,
  1806. "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
  1807. sw_sq_cons, cqe->wr_id, cqe->status);
  1808. cqe++;
  1809. (*budget)--;
  1810. bnxt_qplib_lock_buddy_cq(qp, cq);
  1811. bnxt_qplib_mark_qp_error(qp);
  1812. bnxt_qplib_unlock_buddy_cq(qp, cq);
  1813. } else {
  1814. if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
  1815. /* Before we complete, do WA 9060 */
  1816. if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
  1817. cqe_sq_cons)) {
  1818. *lib_qp = qp;
  1819. goto out;
  1820. }
  1821. cqe->status = CQ_REQ_STATUS_OK;
  1822. cqe++;
  1823. (*budget)--;
  1824. }
  1825. }
  1826. skip:
  1827. sq->hwq.cons++;
  1828. if (sq->single)
  1829. break;
  1830. }
  1831. out:
  1832. *pcqe = cqe;
  1833. if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
  1834. /* Out of budget */
  1835. rc = -EAGAIN;
  1836. goto done;
  1837. }
  1838. /*
  1839. * Back to normal completion mode only after it has completed all of
  1840. * the WC for this CQE
  1841. */
  1842. sq->single = false;
  1843. done:
  1844. return rc;
  1845. }
  1846. static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
  1847. struct cq_res_rc *hwcqe,
  1848. struct bnxt_qplib_cqe **pcqe,
  1849. int *budget)
  1850. {
  1851. struct bnxt_qplib_qp *qp;
  1852. struct bnxt_qplib_q *rq;
  1853. struct bnxt_qplib_cqe *cqe;
  1854. u32 wr_id_idx;
  1855. int rc = 0;
  1856. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1857. le64_to_cpu(hwcqe->qp_handle));
  1858. if (!qp) {
  1859. dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
  1860. return -EINVAL;
  1861. }
  1862. if (qp->rq.flushed) {
  1863. dev_dbg(&cq->hwq.pdev->dev,
  1864. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1865. goto done;
  1866. }
  1867. cqe = *pcqe;
  1868. cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
  1869. cqe->length = le32_to_cpu(hwcqe->length);
  1870. cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
  1871. cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
  1872. cqe->flags = le16_to_cpu(hwcqe->flags);
  1873. cqe->status = hwcqe->status;
  1874. cqe->qp_handle = (u64)(unsigned long)qp;
  1875. wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
  1876. CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
  1877. rq = &qp->rq;
  1878. if (wr_id_idx > rq->hwq.max_elements) {
  1879. dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
  1880. dev_err(&cq->hwq.pdev->dev,
  1881. "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
  1882. wr_id_idx, rq->hwq.max_elements);
  1883. return -EINVAL;
  1884. }
  1885. cqe->wr_id = rq->swq[wr_id_idx].wr_id;
  1886. cqe++;
  1887. (*budget)--;
  1888. rq->hwq.cons++;
  1889. *pcqe = cqe;
  1890. if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
  1891. qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
  1892. /* Add qp to flush list of the CQ */
  1893. bnxt_qplib_lock_buddy_cq(qp, cq);
  1894. __bnxt_qplib_add_flush_qp(qp);
  1895. bnxt_qplib_unlock_buddy_cq(qp, cq);
  1896. }
  1897. done:
  1898. return rc;
  1899. }
  1900. static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
  1901. struct cq_res_ud *hwcqe,
  1902. struct bnxt_qplib_cqe **pcqe,
  1903. int *budget)
  1904. {
  1905. struct bnxt_qplib_qp *qp;
  1906. struct bnxt_qplib_q *rq;
  1907. struct bnxt_qplib_cqe *cqe;
  1908. u32 wr_id_idx;
  1909. int rc = 0;
  1910. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1911. le64_to_cpu(hwcqe->qp_handle));
  1912. if (!qp) {
  1913. dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
  1914. return -EINVAL;
  1915. }
  1916. if (qp->rq.flushed) {
  1917. dev_dbg(&cq->hwq.pdev->dev,
  1918. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1919. goto done;
  1920. }
  1921. cqe = *pcqe;
  1922. cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
  1923. cqe->length = le32_to_cpu(hwcqe->length);
  1924. cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
  1925. cqe->flags = le16_to_cpu(hwcqe->flags);
  1926. cqe->status = hwcqe->status;
  1927. cqe->qp_handle = (u64)(unsigned long)qp;
  1928. memcpy(cqe->smac, hwcqe->src_mac, 6);
  1929. wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
  1930. & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
  1931. cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
  1932. ((le32_to_cpu(
  1933. hwcqe->src_qp_high_srq_or_rq_wr_id) &
  1934. CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
  1935. rq = &qp->rq;
  1936. if (wr_id_idx > rq->hwq.max_elements) {
  1937. dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
  1938. dev_err(&cq->hwq.pdev->dev,
  1939. "QPLIB: wr_id idx %#x exceeded RQ max %#x",
  1940. wr_id_idx, rq->hwq.max_elements);
  1941. return -EINVAL;
  1942. }
  1943. cqe->wr_id = rq->swq[wr_id_idx].wr_id;
  1944. cqe++;
  1945. (*budget)--;
  1946. rq->hwq.cons++;
  1947. *pcqe = cqe;
  1948. if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
  1949. qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
  1950. /* Add qp to flush list of the CQ */
  1951. bnxt_qplib_lock_buddy_cq(qp, cq);
  1952. __bnxt_qplib_add_flush_qp(qp);
  1953. bnxt_qplib_unlock_buddy_cq(qp, cq);
  1954. }
  1955. done:
  1956. return rc;
  1957. }
  1958. bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
  1959. {
  1960. struct cq_base *hw_cqe, **hw_cqe_ptr;
  1961. unsigned long flags;
  1962. u32 sw_cons, raw_cons;
  1963. bool rc = true;
  1964. spin_lock_irqsave(&cq->hwq.lock, flags);
  1965. raw_cons = cq->hwq.cons;
  1966. sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
  1967. hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
  1968. hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
  1969. /* Check for Valid bit. If the CQE is valid, return false */
  1970. rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
  1971. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  1972. return rc;
  1973. }
  1974. static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
  1975. struct cq_res_raweth_qp1 *hwcqe,
  1976. struct bnxt_qplib_cqe **pcqe,
  1977. int *budget)
  1978. {
  1979. struct bnxt_qplib_qp *qp;
  1980. struct bnxt_qplib_q *rq;
  1981. struct bnxt_qplib_cqe *cqe;
  1982. u32 wr_id_idx;
  1983. int rc = 0;
  1984. qp = (struct bnxt_qplib_qp *)((unsigned long)
  1985. le64_to_cpu(hwcqe->qp_handle));
  1986. if (!qp) {
  1987. dev_err(&cq->hwq.pdev->dev,
  1988. "QPLIB: process_cq Raw/QP1 qp is NULL");
  1989. return -EINVAL;
  1990. }
  1991. if (qp->rq.flushed) {
  1992. dev_dbg(&cq->hwq.pdev->dev,
  1993. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  1994. goto done;
  1995. }
  1996. cqe = *pcqe;
  1997. cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
  1998. cqe->flags = le16_to_cpu(hwcqe->flags);
  1999. cqe->qp_handle = (u64)(unsigned long)qp;
  2000. wr_id_idx =
  2001. le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
  2002. & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
  2003. cqe->src_qp = qp->id;
  2004. if (qp->id == 1 && !cqe->length) {
  2005. /* Add workaround for the length misdetection */
  2006. cqe->length = 296;
  2007. } else {
  2008. cqe->length = le16_to_cpu(hwcqe->length);
  2009. }
  2010. cqe->pkey_index = qp->pkey_index;
  2011. memcpy(cqe->smac, qp->smac, 6);
  2012. cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
  2013. cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
  2014. cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
  2015. rq = &qp->rq;
  2016. if (wr_id_idx > rq->hwq.max_elements) {
  2017. dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
  2018. dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
  2019. wr_id_idx, rq->hwq.max_elements);
  2020. return -EINVAL;
  2021. }
  2022. cqe->wr_id = rq->swq[wr_id_idx].wr_id;
  2023. cqe++;
  2024. (*budget)--;
  2025. rq->hwq.cons++;
  2026. *pcqe = cqe;
  2027. if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
  2028. qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
  2029. /* Add qp to flush list of the CQ */
  2030. bnxt_qplib_lock_buddy_cq(qp, cq);
  2031. __bnxt_qplib_add_flush_qp(qp);
  2032. bnxt_qplib_unlock_buddy_cq(qp, cq);
  2033. }
  2034. done:
  2035. return rc;
  2036. }
  2037. static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
  2038. struct cq_terminal *hwcqe,
  2039. struct bnxt_qplib_cqe **pcqe,
  2040. int *budget)
  2041. {
  2042. struct bnxt_qplib_qp *qp;
  2043. struct bnxt_qplib_q *sq, *rq;
  2044. struct bnxt_qplib_cqe *cqe;
  2045. u32 sw_cons = 0, cqe_cons;
  2046. int rc = 0;
  2047. /* Check the Status */
  2048. if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
  2049. dev_warn(&cq->hwq.pdev->dev,
  2050. "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
  2051. hwcqe->status);
  2052. qp = (struct bnxt_qplib_qp *)((unsigned long)
  2053. le64_to_cpu(hwcqe->qp_handle));
  2054. if (!qp) {
  2055. dev_err(&cq->hwq.pdev->dev,
  2056. "QPLIB: FP: CQ Process terminal qp is NULL");
  2057. return -EINVAL;
  2058. }
  2059. /* Must block new posting of SQ and RQ */
  2060. qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
  2061. sq = &qp->sq;
  2062. rq = &qp->rq;
  2063. cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
  2064. if (cqe_cons == 0xFFFF)
  2065. goto do_rq;
  2066. if (cqe_cons > sq->hwq.max_elements) {
  2067. dev_err(&cq->hwq.pdev->dev,
  2068. "QPLIB: FP: CQ Process terminal reported ");
  2069. dev_err(&cq->hwq.pdev->dev,
  2070. "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
  2071. cqe_cons, sq->hwq.max_elements);
  2072. goto do_rq;
  2073. }
  2074. if (qp->sq.flushed) {
  2075. dev_dbg(&cq->hwq.pdev->dev,
  2076. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  2077. goto sq_done;
  2078. }
  2079. /* Terminal CQE can also include aggregated successful CQEs prior.
  2080. * So we must complete all CQEs from the current sq's cons to the
  2081. * cq_cons with status OK
  2082. */
  2083. cqe = *pcqe;
  2084. while (*budget) {
  2085. sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
  2086. if (sw_cons == cqe_cons)
  2087. break;
  2088. if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
  2089. memset(cqe, 0, sizeof(*cqe));
  2090. cqe->status = CQ_REQ_STATUS_OK;
  2091. cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
  2092. cqe->qp_handle = (u64)(unsigned long)qp;
  2093. cqe->src_qp = qp->id;
  2094. cqe->wr_id = sq->swq[sw_cons].wr_id;
  2095. cqe->type = sq->swq[sw_cons].type;
  2096. cqe++;
  2097. (*budget)--;
  2098. }
  2099. sq->hwq.cons++;
  2100. }
  2101. *pcqe = cqe;
  2102. if (!(*budget) && sw_cons != cqe_cons) {
  2103. /* Out of budget */
  2104. rc = -EAGAIN;
  2105. goto sq_done;
  2106. }
  2107. sq_done:
  2108. if (rc)
  2109. return rc;
  2110. do_rq:
  2111. cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
  2112. if (cqe_cons == 0xFFFF) {
  2113. goto done;
  2114. } else if (cqe_cons > rq->hwq.max_elements) {
  2115. dev_err(&cq->hwq.pdev->dev,
  2116. "QPLIB: FP: CQ Processed terminal ");
  2117. dev_err(&cq->hwq.pdev->dev,
  2118. "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
  2119. cqe_cons, rq->hwq.max_elements);
  2120. goto done;
  2121. }
  2122. if (qp->rq.flushed) {
  2123. dev_dbg(&cq->hwq.pdev->dev,
  2124. "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
  2125. rc = 0;
  2126. goto done;
  2127. }
  2128. /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
  2129. * from the current rq->cons to the rq->prod regardless what the
  2130. * rq->cons the terminal CQE indicates
  2131. */
  2132. /* Add qp to flush list of the CQ */
  2133. bnxt_qplib_lock_buddy_cq(qp, cq);
  2134. __bnxt_qplib_add_flush_qp(qp);
  2135. bnxt_qplib_unlock_buddy_cq(qp, cq);
  2136. done:
  2137. return rc;
  2138. }
  2139. static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
  2140. struct cq_cutoff *hwcqe)
  2141. {
  2142. /* Check the Status */
  2143. if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
  2144. dev_err(&cq->hwq.pdev->dev,
  2145. "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
  2146. hwcqe->status);
  2147. return -EINVAL;
  2148. }
  2149. clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
  2150. wake_up_interruptible(&cq->waitq);
  2151. return 0;
  2152. }
  2153. int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
  2154. struct bnxt_qplib_cqe *cqe,
  2155. int num_cqes)
  2156. {
  2157. struct bnxt_qplib_qp *qp = NULL;
  2158. u32 budget = num_cqes;
  2159. unsigned long flags;
  2160. spin_lock_irqsave(&cq->hwq.lock, flags);
  2161. list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
  2162. dev_dbg(&cq->hwq.pdev->dev,
  2163. "QPLIB: FP: Flushing SQ QP= %p",
  2164. qp);
  2165. __flush_sq(&qp->sq, qp, &cqe, &budget);
  2166. }
  2167. list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
  2168. dev_dbg(&cq->hwq.pdev->dev,
  2169. "QPLIB: FP: Flushing RQ QP= %p",
  2170. qp);
  2171. __flush_rq(&qp->rq, qp, &cqe, &budget);
  2172. }
  2173. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  2174. return num_cqes - budget;
  2175. }
  2176. int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
  2177. int num_cqes, struct bnxt_qplib_qp **lib_qp)
  2178. {
  2179. struct cq_base *hw_cqe, **hw_cqe_ptr;
  2180. unsigned long flags;
  2181. u32 sw_cons, raw_cons;
  2182. int budget, rc = 0;
  2183. spin_lock_irqsave(&cq->hwq.lock, flags);
  2184. raw_cons = cq->hwq.cons;
  2185. budget = num_cqes;
  2186. while (budget) {
  2187. sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
  2188. hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
  2189. hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
  2190. /* Check for Valid bit */
  2191. if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
  2192. break;
  2193. /*
  2194. * The valid test of the entry must be done first before
  2195. * reading any further.
  2196. */
  2197. dma_rmb();
  2198. /* From the device's respective CQE format to qplib_wc*/
  2199. switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
  2200. case CQ_BASE_CQE_TYPE_REQ:
  2201. rc = bnxt_qplib_cq_process_req(cq,
  2202. (struct cq_req *)hw_cqe,
  2203. &cqe, &budget,
  2204. sw_cons, lib_qp);
  2205. break;
  2206. case CQ_BASE_CQE_TYPE_RES_RC:
  2207. rc = bnxt_qplib_cq_process_res_rc(cq,
  2208. (struct cq_res_rc *)
  2209. hw_cqe, &cqe,
  2210. &budget);
  2211. break;
  2212. case CQ_BASE_CQE_TYPE_RES_UD:
  2213. rc = bnxt_qplib_cq_process_res_ud
  2214. (cq, (struct cq_res_ud *)hw_cqe, &cqe,
  2215. &budget);
  2216. break;
  2217. case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
  2218. rc = bnxt_qplib_cq_process_res_raweth_qp1
  2219. (cq, (struct cq_res_raweth_qp1 *)
  2220. hw_cqe, &cqe, &budget);
  2221. break;
  2222. case CQ_BASE_CQE_TYPE_TERMINAL:
  2223. rc = bnxt_qplib_cq_process_terminal
  2224. (cq, (struct cq_terminal *)hw_cqe,
  2225. &cqe, &budget);
  2226. break;
  2227. case CQ_BASE_CQE_TYPE_CUT_OFF:
  2228. bnxt_qplib_cq_process_cutoff
  2229. (cq, (struct cq_cutoff *)hw_cqe);
  2230. /* Done processing this CQ */
  2231. goto exit;
  2232. default:
  2233. dev_err(&cq->hwq.pdev->dev,
  2234. "QPLIB: process_cq unknown type 0x%lx",
  2235. hw_cqe->cqe_type_toggle &
  2236. CQ_BASE_CQE_TYPE_MASK);
  2237. rc = -EINVAL;
  2238. break;
  2239. }
  2240. if (rc < 0) {
  2241. if (rc == -EAGAIN)
  2242. break;
  2243. /* Error while processing the CQE, just skip to the
  2244. * next one
  2245. */
  2246. dev_err(&cq->hwq.pdev->dev,
  2247. "QPLIB: process_cqe error rc = 0x%x", rc);
  2248. }
  2249. raw_cons++;
  2250. }
  2251. if (cq->hwq.cons != raw_cons) {
  2252. cq->hwq.cons = raw_cons;
  2253. bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
  2254. }
  2255. exit:
  2256. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  2257. return num_cqes - budget;
  2258. }
  2259. void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
  2260. {
  2261. unsigned long flags;
  2262. spin_lock_irqsave(&cq->hwq.lock, flags);
  2263. if (arm_type)
  2264. bnxt_qplib_arm_cq(cq, arm_type);
  2265. /* Using cq->arm_state variable to track whether to issue cq handler */
  2266. atomic_set(&cq->arm_state, 1);
  2267. spin_unlock_irqrestore(&cq->hwq.lock, flags);
  2268. }
  2269. void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
  2270. {
  2271. flush_workqueue(qp->scq->nq->cqn_wq);
  2272. if (qp->scq != qp->rcq)
  2273. flush_workqueue(qp->rcq->nq->cqn_wq);
  2274. }