123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547 |
- /*
- * Broadcom NetXtreme-E RoCE driver.
- *
- * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
- * Broadcom refers to Broadcom Limited and/or its subsidiaries.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Description: Fast Path Operators
- */
- #include <linux/interrupt.h>
- #include <linux/spinlock.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/pci.h>
- #include <linux/prefetch.h>
- #include "roce_hsi.h"
- #include "qplib_res.h"
- #include "qplib_rcfw.h"
- #include "qplib_sp.h"
- #include "qplib_fp.h"
- static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
- static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
- static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
- {
- qp->sq.condition = false;
- qp->sq.send_phantom = false;
- qp->sq.single = false;
- }
- /* Flush list */
- static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_cq *scq, *rcq;
- scq = qp->scq;
- rcq = qp->rcq;
- if (!qp->sq.flushed) {
- dev_dbg(&scq->hwq.pdev->dev,
- "QPLIB: FP: Adding to SQ Flush list = %p",
- qp);
- bnxt_qplib_cancel_phantom_processing(qp);
- list_add_tail(&qp->sq_flush, &scq->sqf_head);
- qp->sq.flushed = true;
- }
- if (!qp->srq) {
- if (!qp->rq.flushed) {
- dev_dbg(&rcq->hwq.pdev->dev,
- "QPLIB: FP: Adding to RQ Flush list = %p",
- qp);
- list_add_tail(&qp->rq_flush, &rcq->rqf_head);
- qp->rq.flushed = true;
- }
- }
- }
- void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
- unsigned long *flags)
- __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
- {
- spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
- if (qp->scq == qp->rcq)
- __acquire(&qp->rcq->hwq.lock);
- else
- spin_lock(&qp->rcq->hwq.lock);
- }
- void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
- unsigned long *flags)
- __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
- {
- if (qp->scq == qp->rcq)
- __release(&qp->rcq->hwq.lock);
- else
- spin_unlock(&qp->rcq->hwq.lock);
- spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
- }
- static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
- {
- struct bnxt_qplib_cq *buddy_cq = NULL;
- if (qp->scq == qp->rcq)
- buddy_cq = NULL;
- else if (qp->scq == cq)
- buddy_cq = qp->rcq;
- else
- buddy_cq = qp->scq;
- return buddy_cq;
- }
- static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
- __acquires(&buddy_cq->hwq.lock)
- {
- struct bnxt_qplib_cq *buddy_cq = NULL;
- buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
- if (!buddy_cq)
- __acquire(&cq->hwq.lock);
- else
- spin_lock(&buddy_cq->hwq.lock);
- }
- static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
- __releases(&buddy_cq->hwq.lock)
- {
- struct bnxt_qplib_cq *buddy_cq = NULL;
- buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
- if (!buddy_cq)
- __release(&cq->hwq.lock);
- else
- spin_unlock(&buddy_cq->hwq.lock);
- }
- void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
- {
- unsigned long flags;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
- }
- static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
- {
- if (qp->sq.flushed) {
- qp->sq.flushed = false;
- list_del(&qp->sq_flush);
- }
- if (!qp->srq) {
- if (qp->rq.flushed) {
- qp->rq.flushed = false;
- list_del(&qp->rq_flush);
- }
- }
- }
- void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
- {
- unsigned long flags;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
- __clean_cq(qp->scq, (u64)(unsigned long)qp);
- qp->sq.hwq.prod = 0;
- qp->sq.hwq.cons = 0;
- __clean_cq(qp->rcq, (u64)(unsigned long)qp);
- qp->rq.hwq.prod = 0;
- qp->rq.hwq.cons = 0;
- __bnxt_qplib_del_flush_qp(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
- }
- static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
- {
- struct bnxt_qplib_nq_work *nq_work =
- container_of(work, struct bnxt_qplib_nq_work, work);
- struct bnxt_qplib_cq *cq = nq_work->cq;
- struct bnxt_qplib_nq *nq = nq_work->nq;
- if (cq && nq) {
- spin_lock_bh(&cq->compl_lock);
- if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
- dev_dbg(&nq->pdev->dev,
- "%s:Trigger cq = %p event nq = %p\n",
- __func__, cq, nq);
- nq->cqn_handler(nq, cq);
- }
- spin_unlock_bh(&cq->compl_lock);
- }
- kfree(nq_work);
- }
- static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
- struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_q *rq = &qp->rq;
- struct bnxt_qplib_q *sq = &qp->sq;
- if (qp->rq_hdr_buf)
- dma_free_coherent(&res->pdev->dev,
- rq->hwq.max_elements * qp->rq_hdr_buf_size,
- qp->rq_hdr_buf, qp->rq_hdr_buf_map);
- if (qp->sq_hdr_buf)
- dma_free_coherent(&res->pdev->dev,
- sq->hwq.max_elements * qp->sq_hdr_buf_size,
- qp->sq_hdr_buf, qp->sq_hdr_buf_map);
- qp->rq_hdr_buf = NULL;
- qp->sq_hdr_buf = NULL;
- qp->rq_hdr_buf_map = 0;
- qp->sq_hdr_buf_map = 0;
- qp->sq_hdr_buf_size = 0;
- qp->rq_hdr_buf_size = 0;
- }
- static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
- struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_q *rq = &qp->rq;
- struct bnxt_qplib_q *sq = &qp->rq;
- int rc = 0;
- if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
- qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
- sq->hwq.max_elements *
- qp->sq_hdr_buf_size,
- &qp->sq_hdr_buf_map, GFP_KERNEL);
- if (!qp->sq_hdr_buf) {
- rc = -ENOMEM;
- dev_err(&res->pdev->dev,
- "QPLIB: Failed to create sq_hdr_buf");
- goto fail;
- }
- }
- if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
- qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
- rq->hwq.max_elements *
- qp->rq_hdr_buf_size,
- &qp->rq_hdr_buf_map,
- GFP_KERNEL);
- if (!qp->rq_hdr_buf) {
- rc = -ENOMEM;
- dev_err(&res->pdev->dev,
- "QPLIB: Failed to create rq_hdr_buf");
- goto fail;
- }
- }
- return 0;
- fail:
- bnxt_qplib_free_qp_hdr_buf(res, qp);
- return rc;
- }
- static void bnxt_qplib_service_nq(unsigned long data)
- {
- struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
- struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base *nqe, **nq_ptr;
- struct bnxt_qplib_cq *cq;
- int num_cqne_processed = 0;
- u32 sw_cons, raw_cons;
- u16 type;
- int budget = nq->budget;
- u64 q_handle;
- /* Service the NQ until empty */
- raw_cons = hwq->cons;
- while (budget--) {
- sw_cons = HWQ_CMP(raw_cons, hwq);
- nq_ptr = (struct nq_base **)hwq->pbl_ptr;
- nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
- if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
- break;
- /*
- * The valid test of the entry must be done first before
- * reading any further.
- */
- dma_rmb();
- type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
- switch (type) {
- case NQ_BASE_TYPE_CQ_NOTIFICATION:
- {
- struct nq_cn *nqcne = (struct nq_cn *)nqe;
- q_handle = le32_to_cpu(nqcne->cq_handle_low);
- q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
- << 32;
- cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
- bnxt_qplib_arm_cq_enable(cq);
- spin_lock_bh(&cq->compl_lock);
- atomic_set(&cq->arm_state, 0);
- if (!nq->cqn_handler(nq, (cq)))
- num_cqne_processed++;
- else
- dev_warn(&nq->pdev->dev,
- "QPLIB: cqn - type 0x%x not handled",
- type);
- spin_unlock_bh(&cq->compl_lock);
- break;
- }
- case NQ_BASE_TYPE_DBQ_EVENT:
- break;
- default:
- dev_warn(&nq->pdev->dev,
- "QPLIB: nqe with type = 0x%x not handled",
- type);
- break;
- }
- raw_cons++;
- }
- if (hwq->cons != raw_cons) {
- hwq->cons = raw_cons;
- NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
- }
- }
- static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
- {
- struct bnxt_qplib_nq *nq = dev_instance;
- struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base **nq_ptr;
- u32 sw_cons;
- /* Prefetch the NQ element */
- sw_cons = HWQ_CMP(hwq->cons, hwq);
- nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
- prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
- /* Fan out to CPU affinitized kthreads? */
- tasklet_schedule(&nq->worker);
- return IRQ_HANDLED;
- }
- void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
- {
- if (nq->cqn_wq) {
- destroy_workqueue(nq->cqn_wq);
- nq->cqn_wq = NULL;
- }
- /* Make sure the HW is stopped! */
- synchronize_irq(nq->vector);
- tasklet_disable(&nq->worker);
- tasklet_kill(&nq->worker);
- if (nq->requested) {
- irq_set_affinity_hint(nq->vector, NULL);
- free_irq(nq->vector, nq);
- nq->requested = false;
- }
- if (nq->bar_reg_iomem)
- iounmap(nq->bar_reg_iomem);
- nq->bar_reg_iomem = NULL;
- nq->cqn_handler = NULL;
- nq->srqn_handler = NULL;
- nq->vector = 0;
- }
- int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
- int nq_idx, int msix_vector, int bar_reg_offset,
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *),
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *, u8 event))
- {
- resource_size_t nq_base;
- int rc = -1;
- nq->pdev = pdev;
- nq->vector = msix_vector;
- nq->cqn_handler = cqn_handler;
- nq->srqn_handler = srqn_handler;
- tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
- /* Have a task to schedule CQ notifiers in post send case */
- nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
- if (!nq->cqn_wq)
- goto fail;
- nq->requested = false;
- memset(nq->name, 0, 32);
- sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
- if (rc) {
- dev_err(&nq->pdev->dev,
- "Failed to request IRQ for NQ: %#x", rc);
- bnxt_qplib_disable_nq(nq);
- goto fail;
- }
- cpumask_clear(&nq->mask);
- cpumask_set_cpu(nq_idx, &nq->mask);
- rc = irq_set_affinity_hint(nq->vector, &nq->mask);
- if (rc) {
- dev_warn(&nq->pdev->dev,
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
- nq->vector, nq_idx);
- }
- nq->requested = true;
- nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
- nq->bar_reg_off = bar_reg_offset;
- nq_base = pci_resource_start(pdev, nq->bar_reg);
- if (!nq_base) {
- rc = -ENOMEM;
- goto fail;
- }
- nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
- if (!nq->bar_reg_iomem) {
- rc = -ENOMEM;
- goto fail;
- }
- NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
- return 0;
- fail:
- bnxt_qplib_disable_nq(nq);
- return rc;
- }
- void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
- {
- if (nq->hwq.max_elements) {
- bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
- nq->hwq.max_elements = 0;
- }
- }
- int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
- {
- nq->pdev = pdev;
- if (!nq->hwq.max_elements ||
- nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
- nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
- if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
- &nq->hwq.max_elements,
- BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_L2_CMPL))
- return -ENOMEM;
- nq->budget = 8;
- return 0;
- }
- /* QP */
- int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_qp1 req;
- struct creq_create_qp1_resp resp;
- struct bnxt_qplib_pbl *pbl;
- struct bnxt_qplib_q *sq = &qp->sq;
- struct bnxt_qplib_q *rq = &qp->rq;
- int rc;
- u16 cmd_flags = 0;
- u32 qp_flags = 0;
- RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
- /* General */
- req.type = qp->type;
- req.dpi = cpu_to_le32(qp->dpi->dpi);
- req.qp_handle = cpu_to_le64(qp->qp_handle);
- /* SQ */
- sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
- &sq->hwq.max_elements,
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
- if (rc)
- goto exit;
- sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
- if (!sq->swq) {
- rc = -ENOMEM;
- goto fail_sq;
- }
- pbl = &sq->hwq.pbl[PBL_LVL_0];
- req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
- << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
- if (qp->scq)
- req.scq_cid = cpu_to_le32(qp->scq->id);
- qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
- /* RQ */
- if (rq->max_wqe) {
- rq->hwq.max_elements = qp->rq.max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
- &rq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
- if (rc)
- goto fail_sq;
- rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
- GFP_KERNEL);
- if (!rq->swq) {
- rc = -ENOMEM;
- goto fail_rq;
- }
- pbl = &rq->hwq.pbl[PBL_LVL_0];
- req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP1_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
- if (qp->rcq)
- req.rcq_cid = cpu_to_le32(qp->rcq->id);
- }
- /* Header buffer - allow hdr_buf pass in */
- rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
- if (rc) {
- rc = -ENOMEM;
- goto fail;
- }
- req.qp_flags = cpu_to_le32(qp_flags);
- req.sq_size = cpu_to_le32(sq->hwq.max_elements);
- req.rq_size = cpu_to_le32(rq->hwq.max_elements);
- req.sq_fwo_sq_sge =
- cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
- CMDQ_CREATE_QP1_SQ_SGE_SFT);
- req.rq_fwo_rq_sge =
- cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
- CMDQ_CREATE_QP1_RQ_SGE_SFT);
- req.pd_id = cpu_to_le32(qp->pd->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
- if (rc)
- goto fail;
- qp->id = le32_to_cpu(resp.xid);
- qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
- return 0;
- fail:
- bnxt_qplib_free_qp_hdr_buf(res, qp);
- fail_rq:
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
- kfree(rq->swq);
- fail_sq:
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
- kfree(sq->swq);
- exit:
- return rc;
- }
- int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
- struct cmdq_create_qp req;
- struct creq_create_qp_resp resp;
- struct bnxt_qplib_pbl *pbl;
- struct sq_psn_search **psn_search_ptr;
- unsigned long int psn_search, poff = 0;
- struct bnxt_qplib_q *sq = &qp->sq;
- struct bnxt_qplib_q *rq = &qp->rq;
- struct bnxt_qplib_hwq *xrrq;
- int i, rc, req_size, psn_sz;
- u16 cmd_flags = 0, max_ssge;
- u32 sw_prod, qp_flags = 0;
- RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
- /* General */
- req.type = qp->type;
- req.dpi = cpu_to_le32(qp->dpi->dpi);
- req.qp_handle = cpu_to_le64(qp->qp_handle);
- /* SQ */
- psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
- sizeof(struct sq_psn_search) : 0;
- sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
- sq->nmap, &sq->hwq.max_elements,
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
- psn_sz,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
- if (rc)
- goto exit;
- sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
- if (!sq->swq) {
- rc = -ENOMEM;
- goto fail_sq;
- }
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- if (psn_sz) {
- psn_search_ptr = (struct sq_psn_search **)
- &hw_sq_send_ptr[get_sqe_pg
- (sq->hwq.max_elements)];
- psn_search = (unsigned long int)
- &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
- [get_sqe_idx(sq->hwq.max_elements)];
- if (psn_search & ~PAGE_MASK) {
- /* If the psn_search does not start on a page boundary,
- * then calculate the offset
- */
- poff = (psn_search & ~PAGE_MASK) /
- BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
- }
- for (i = 0; i < sq->hwq.max_elements; i++)
- sq->swq[i].psn_search =
- &psn_search_ptr[get_psne_pg(i + poff)]
- [get_psne_idx(i + poff)];
- }
- pbl = &sq->hwq.pbl[PBL_LVL_0];
- req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
- << CMDQ_CREATE_QP_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
- /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
- [get_sqe_idx(sw_prod)];
- hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
- }
- if (qp->scq)
- req.scq_cid = cpu_to_le32(qp->scq->id);
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
- if (qp->sig_type)
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
- /* RQ */
- if (rq->max_wqe) {
- rq->hwq.max_elements = rq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
- rq->nmap, &rq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
- if (rc)
- goto fail_sq;
- rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
- GFP_KERNEL);
- if (!rq->swq) {
- rc = -ENOMEM;
- goto fail_rq;
- }
- pbl = &rq->hwq.pbl[PBL_LVL_0];
- req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
- }
- if (qp->rcq)
- req.rcq_cid = cpu_to_le32(qp->rcq->id);
- req.qp_flags = cpu_to_le32(qp_flags);
- req.sq_size = cpu_to_le32(sq->hwq.max_elements);
- req.rq_size = cpu_to_le32(rq->hwq.max_elements);
- qp->sq_hdr_buf = NULL;
- qp->rq_hdr_buf = NULL;
- rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
- if (rc)
- goto fail_rq;
- /* CTRL-22434: Irrespective of the requested SGE count on the SQ
- * always create the QP with max send sges possible if the requested
- * inline size is greater than 0.
- */
- max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
- req.sq_fwo_sq_sge = cpu_to_le16(
- ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
- << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
- req.rq_fwo_rq_sge = cpu_to_le16(
- ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
- << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
- /* ORRQ and IRRQ */
- if (psn_sz) {
- xrrq = &qp->orrq;
- xrrq->max_elements =
- ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
- req_size = xrrq->max_elements *
- BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
- req_size &= ~(PAGE_SIZE - 1);
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
- &xrrq->max_elements,
- BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
- 0, req_size, HWQ_TYPE_CTX);
- if (rc)
- goto fail_buf_free;
- pbl = &xrrq->pbl[PBL_LVL_0];
- req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
- xrrq = &qp->irrq;
- xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
- qp->max_dest_rd_atomic);
- req_size = xrrq->max_elements *
- BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
- req_size &= ~(PAGE_SIZE - 1);
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
- &xrrq->max_elements,
- BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
- 0, req_size, HWQ_TYPE_CTX);
- if (rc)
- goto fail_orrq;
- pbl = &xrrq->pbl[PBL_LVL_0];
- req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
- }
- req.pd_id = cpu_to_le32(qp->pd->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
- if (rc)
- goto fail;
- qp->id = le32_to_cpu(resp.xid);
- qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- INIT_LIST_HEAD(&qp->sq_flush);
- INIT_LIST_HEAD(&qp->rq_flush);
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
- return 0;
- fail:
- if (qp->irrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
- fail_orrq:
- if (qp->orrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
- fail_buf_free:
- bnxt_qplib_free_qp_hdr_buf(res, qp);
- fail_rq:
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
- kfree(rq->swq);
- fail_sq:
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
- kfree(sq->swq);
- exit:
- return rc;
- }
- static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
- {
- switch (qp->state) {
- case CMDQ_MODIFY_QP_NEW_STATE_RTR:
- /* INIT->RTR, configure the path_mtu to the default
- * 2048 if not being requested
- */
- if (!(qp->modify_flags &
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
- qp->modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
- qp->path_mtu =
- CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
- }
- qp->modify_flags &=
- ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
- /* Bono FW require the max_dest_rd_atomic to be >= 1 */
- if (qp->max_dest_rd_atomic < 1)
- qp->max_dest_rd_atomic = 1;
- qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
- /* Bono FW 20.6.5 requires SGID_INDEX configuration */
- if (!(qp->modify_flags &
- CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
- qp->modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
- qp->ah.sgid_index = 0;
- }
- break;
- default:
- break;
- }
- }
- static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
- {
- switch (qp->state) {
- case CMDQ_MODIFY_QP_NEW_STATE_RTS:
- /* Bono FW requires the max_rd_atomic to be >= 1 */
- if (qp->max_rd_atomic < 1)
- qp->max_rd_atomic = 1;
- /* Bono FW does not allow PKEY_INDEX,
- * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
- * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
- * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
- * modification
- */
- qp->modify_flags &=
- ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
- CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
- CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
- CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
- CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
- CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
- CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
- CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
- CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
- CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
- CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
- break;
- default:
- break;
- }
- }
- static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
- {
- switch (qp->cur_qp_state) {
- case CMDQ_MODIFY_QP_NEW_STATE_RESET:
- break;
- case CMDQ_MODIFY_QP_NEW_STATE_INIT:
- __modify_flags_from_init_state(qp);
- break;
- case CMDQ_MODIFY_QP_NEW_STATE_RTR:
- __modify_flags_from_rtr_state(qp);
- break;
- case CMDQ_MODIFY_QP_NEW_STATE_RTS:
- break;
- case CMDQ_MODIFY_QP_NEW_STATE_SQD:
- break;
- case CMDQ_MODIFY_QP_NEW_STATE_SQE:
- break;
- case CMDQ_MODIFY_QP_NEW_STATE_ERR:
- break;
- default:
- break;
- }
- }
- int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_modify_qp req;
- struct creq_modify_qp_resp resp;
- u16 cmd_flags = 0, pkey;
- u32 temp32[4];
- u32 bmask;
- int rc;
- RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
- /* Filter out the qp_attr_mask based on the state->new transition */
- __filter_modify_flags(qp);
- bmask = qp->modify_flags;
- req.modify_mask = cpu_to_le32(qp->modify_flags);
- req.qp_cid = cpu_to_le32(qp->id);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
- req.network_type_en_sqd_async_notify_new_state =
- (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
- (qp->en_sqd_async_notify ?
- CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
- }
- req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
- req.access = qp->access;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
- if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
- qp->pkey_index, &pkey))
- req.pkey = cpu_to_le16(pkey);
- }
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
- req.qkey = cpu_to_le32(qp->qkey);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
- memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
- req.dgid[0] = cpu_to_le32(temp32[0]);
- req.dgid[1] = cpu_to_le32(temp32[1]);
- req.dgid[2] = cpu_to_le32(temp32[2]);
- req.dgid[3] = cpu_to_le32(temp32[3]);
- }
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
- req.flow_label = cpu_to_le32(qp->ah.flow_label);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
- req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
- [qp->ah.sgid_index]);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
- req.hop_limit = qp->ah.hop_limit;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
- req.traffic_class = qp->ah.traffic_class;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
- memcpy(req.dest_mac, qp->ah.dmac, 6);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
- req.path_mtu = qp->path_mtu;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
- req.timeout = qp->timeout;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
- req.retry_cnt = qp->retry_cnt;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
- req.rnr_retry = qp->rnr_retry;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
- req.min_rnr_timer = qp->min_rnr_timer;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
- req.rq_psn = cpu_to_le32(qp->rq.psn);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
- req.sq_psn = cpu_to_le32(qp->sq.psn);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
- req.max_rd_atomic =
- ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
- req.max_dest_rd_atomic =
- IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
- req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
- req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
- req.sq_sge = cpu_to_le16(qp->sq.max_sge);
- req.rq_sge = cpu_to_le16(qp->rq.max_sge);
- req.max_inline_data = cpu_to_le32(qp->max_inline_data);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
- req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
- req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
- if (rc)
- return rc;
- qp->cur_qp_state = qp->state;
- return 0;
- }
- int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_query_qp req;
- struct creq_query_qp_resp resp;
- struct bnxt_qplib_rcfw_sbuf *sbuf;
- struct creq_query_qp_resp_sb *sb;
- u16 cmd_flags = 0;
- u32 temp32[4];
- int i, rc = 0;
- RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf)
- return -ENOMEM;
- sb = sbuf->sb;
- req.qp_cid = cpu_to_le32(qp->id);
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
- (void *)sbuf, 0);
- if (rc)
- goto bail;
- /* Extract the context from the side buffer */
- qp->state = sb->en_sqd_async_notify_state &
- CREQ_QUERY_QP_RESP_SB_STATE_MASK;
- qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
- CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
- true : false;
- qp->access = sb->access;
- qp->pkey_index = le16_to_cpu(sb->pkey);
- qp->qkey = le32_to_cpu(sb->qkey);
- temp32[0] = le32_to_cpu(sb->dgid[0]);
- temp32[1] = le32_to_cpu(sb->dgid[1]);
- temp32[2] = le32_to_cpu(sb->dgid[2]);
- temp32[3] = le32_to_cpu(sb->dgid[3]);
- memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
- qp->ah.flow_label = le32_to_cpu(sb->flow_label);
- qp->ah.sgid_index = 0;
- for (i = 0; i < res->sgid_tbl.max; i++) {
- if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
- qp->ah.sgid_index = i;
- break;
- }
- }
- if (i == res->sgid_tbl.max)
- dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
- qp->ah.hop_limit = sb->hop_limit;
- qp->ah.traffic_class = sb->traffic_class;
- memcpy(qp->ah.dmac, sb->dest_mac, 6);
- qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
- CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
- CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
- qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
- CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
- CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
- qp->timeout = sb->timeout;
- qp->retry_cnt = sb->retry_cnt;
- qp->rnr_retry = sb->rnr_retry;
- qp->min_rnr_timer = sb->min_rnr_timer;
- qp->rq.psn = le32_to_cpu(sb->rq_psn);
- qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
- qp->sq.psn = le32_to_cpu(sb->sq_psn);
- qp->max_dest_rd_atomic =
- IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
- qp->sq.max_wqe = qp->sq.hwq.max_elements;
- qp->rq.max_wqe = qp->rq.hwq.max_elements;
- qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
- qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
- qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
- qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
- memcpy(qp->smac, sb->src_mac, 6);
- qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
- bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
- return rc;
- }
- static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
- {
- struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- struct cq_base *hw_cqe, **hw_cqe_ptr;
- int i;
- for (i = 0; i < cq_hwq->max_elements; i++) {
- hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
- if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
- continue;
- /*
- * The valid test of the entry must be done first before
- * reading any further.
- */
- dma_rmb();
- switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
- case CQ_BASE_CQE_TYPE_REQ:
- case CQ_BASE_CQE_TYPE_TERMINAL:
- {
- struct cq_req *cqe = (struct cq_req *)hw_cqe;
- if (qp == le64_to_cpu(cqe->qp_handle))
- cqe->qp_handle = 0;
- break;
- }
- case CQ_BASE_CQE_TYPE_RES_RC:
- case CQ_BASE_CQE_TYPE_RES_UD:
- case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
- {
- struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
- if (qp == le64_to_cpu(cqe->qp_handle))
- cqe->qp_handle = 0;
- break;
- }
- default:
- break;
- }
- }
- }
- int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
- struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_destroy_qp req;
- struct creq_destroy_qp_resp resp;
- unsigned long flags;
- u16 cmd_flags = 0;
- int rc;
- rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
- rcfw->qp_tbl[qp->id].qp_handle = NULL;
- RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
- req.qp_cid = cpu_to_le32(qp->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
- if (rc) {
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = qp;
- return rc;
- }
- /* Must walk the associated CQs to nullified the QP ptr */
- spin_lock_irqsave(&qp->scq->hwq.lock, flags);
- __clean_cq(qp->scq, (u64)(unsigned long)qp);
- if (qp->rcq && qp->rcq != qp->scq) {
- spin_lock(&qp->rcq->hwq.lock);
- __clean_cq(qp->rcq, (u64)(unsigned long)qp);
- spin_unlock(&qp->rcq->hwq.lock);
- }
- spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
- bnxt_qplib_free_qp_hdr_buf(res, qp);
- bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
- kfree(qp->sq.swq);
- bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
- kfree(qp->rq.swq);
- if (qp->irrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
- if (qp->orrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
- return 0;
- }
- void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_sge *sge)
- {
- struct bnxt_qplib_q *sq = &qp->sq;
- u32 sw_prod;
- memset(sge, 0, sizeof(*sge));
- if (qp->sq_hdr_buf) {
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
- sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
- sw_prod * qp->sq_hdr_buf_size);
- sge->lkey = 0xFFFFFFFF;
- sge->size = qp->sq_hdr_buf_size;
- return qp->sq_hdr_buf + sw_prod * sge->size;
- }
- return NULL;
- }
- u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_q *rq = &qp->rq;
- return HWQ_CMP(rq->hwq.prod, &rq->hwq);
- }
- dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
- {
- return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
- }
- void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_sge *sge)
- {
- struct bnxt_qplib_q *rq = &qp->rq;
- u32 sw_prod;
- memset(sge, 0, sizeof(*sge));
- if (qp->rq_hdr_buf) {
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
- sw_prod * qp->rq_hdr_buf_size);
- sge->lkey = 0xFFFFFFFF;
- sge->size = qp->rq_hdr_buf_size;
- return qp->rq_hdr_buf + sw_prod * sge->size;
- }
- return NULL;
- }
- void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_q *sq = &qp->sq;
- struct dbr_dbr db_msg = { 0 };
- u32 sw_prod;
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
- DBR_DBR_INDEX_MASK);
- db_msg.type_xid =
- cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- DBR_DBR_TYPE_SQ);
- /* Flush all the WQE writes to HW */
- wmb();
- __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
- }
- int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_swqe *wqe)
- {
- struct bnxt_qplib_q *sq = &qp->sq;
- struct bnxt_qplib_swq *swq;
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
- struct sq_sge *hw_sge;
- struct bnxt_qplib_nq_work *nq_work = NULL;
- bool sch_handler = false;
- u32 sw_prod;
- u8 wqe_size16;
- int i, rc = 0, data_len = 0, pkt_num = 0;
- __le32 temp32;
- if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
- if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
- sch_handler = true;
- dev_dbg(&sq->hwq.pdev->dev,
- "%s Error QP. Scheduling for poll_cq\n",
- __func__);
- goto queue_err;
- }
- }
- if (bnxt_qplib_queue_full(sq)) {
- dev_err(&sq->hwq.pdev->dev,
- "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
- sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
- sq->q_full_delta);
- rc = -ENOMEM;
- goto done;
- }
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
- swq = &sq->swq[sw_prod];
- swq->wr_id = wqe->wr_id;
- swq->type = wqe->type;
- swq->flags = wqe->flags;
- if (qp->sig_type)
- swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
- swq->start_psn = sq->psn & BTH_PSN_MASK;
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
- [get_sqe_idx(sw_prod)];
- memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
- if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
- /* Copy the inline data */
- if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
- dev_warn(&sq->hwq.pdev->dev,
- "QPLIB: Inline data length > 96 detected");
- data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
- } else {
- data_len = wqe->inline_len;
- }
- memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
- wqe_size16 = (data_len + 15) >> 4;
- } else {
- for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
- i < wqe->num_sge; i++, hw_sge++) {
- hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
- hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
- hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
- data_len += wqe->sg_list[i].size;
- }
- /* Each SGE entry = 1 WQE size16 */
- wqe_size16 = wqe->num_sge;
- /* HW requires wqe size has room for atleast one SGE even if
- * none was supplied by ULP
- */
- if (!wqe->num_sge)
- wqe_size16++;
- }
- /* Specifics */
- switch (wqe->type) {
- case BNXT_QPLIB_SWQE_TYPE_SEND:
- if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
- /* Assemble info for Raw Ethertype QPs */
- struct sq_send_raweth_qp1 *sqe =
- (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
- sqe->wqe_type = wqe->type;
- sqe->flags = wqe->flags;
- sqe->wqe_size = wqe_size16 +
- ((offsetof(typeof(*sqe), data) + 15) >> 4);
- sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
- sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
- sqe->length = cpu_to_le32(data_len);
- sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
- SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
- SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
- break;
- }
- /* fall thru */
- case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
- case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
- {
- struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
- sqe->wqe_type = wqe->type;
- sqe->flags = wqe->flags;
- sqe->wqe_size = wqe_size16 +
- ((offsetof(typeof(*sqe), data) + 15) >> 4);
- sqe->inv_key_or_imm_data = cpu_to_le32(
- wqe->send.inv_key);
- if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
- sqe->q_key = cpu_to_le32(wqe->send.q_key);
- sqe->dst_qp = cpu_to_le32(
- wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
- sqe->length = cpu_to_le32(data_len);
- sqe->avid = cpu_to_le32(wqe->send.avid &
- SQ_SEND_AVID_MASK);
- sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
- } else {
- sqe->length = cpu_to_le32(data_len);
- sqe->dst_qp = 0;
- sqe->avid = 0;
- if (qp->mtu)
- pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
- if (!pkt_num)
- pkt_num = 1;
- sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
- }
- break;
- }
- case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
- case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
- case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
- {
- struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
- sqe->wqe_type = wqe->type;
- sqe->flags = wqe->flags;
- sqe->wqe_size = wqe_size16 +
- ((offsetof(typeof(*sqe), data) + 15) >> 4);
- sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
- sqe->length = cpu_to_le32((u32)data_len);
- sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
- sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
- if (qp->mtu)
- pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
- if (!pkt_num)
- pkt_num = 1;
- sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
- break;
- }
- case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
- case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
- {
- struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
- sqe->wqe_type = wqe->type;
- sqe->flags = wqe->flags;
- sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
- sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
- sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
- sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
- if (qp->mtu)
- pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
- if (!pkt_num)
- pkt_num = 1;
- sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
- break;
- }
- case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
- {
- struct sq_localinvalidate *sqe =
- (struct sq_localinvalidate *)hw_sq_send_hdr;
- sqe->wqe_type = wqe->type;
- sqe->flags = wqe->flags;
- sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
- break;
- }
- case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
- {
- struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
- sqe->wqe_type = wqe->type;
- sqe->flags = wqe->flags;
- sqe->access_cntl = wqe->frmr.access_cntl |
- SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
- sqe->zero_based_page_size_log =
- (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
- SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
- (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
- sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
- temp32 = cpu_to_le32(wqe->frmr.length);
- memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
- sqe->numlevels_pbl_page_size_log =
- ((wqe->frmr.pbl_pg_sz_log <<
- SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
- SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
- ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
- SQ_FR_PMR_NUMLEVELS_MASK);
- for (i = 0; i < wqe->frmr.page_list_len; i++)
- wqe->frmr.pbl_ptr[i] = cpu_to_le64(
- wqe->frmr.page_list[i] |
- PTU_PTE_VALID);
- sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
- sqe->va = cpu_to_le64(wqe->frmr.va);
- break;
- }
- case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
- {
- struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
- sqe->wqe_type = wqe->type;
- sqe->flags = wqe->flags;
- sqe->access_cntl = wqe->bind.access_cntl;
- sqe->mw_type_zero_based = wqe->bind.mw_type |
- (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
- sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
- sqe->l_key = cpu_to_le32(wqe->bind.r_key);
- sqe->va = cpu_to_le64(wqe->bind.va);
- temp32 = cpu_to_le32(wqe->bind.length);
- memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
- break;
- }
- default:
- /* Bad wqe, return error */
- rc = -EINVAL;
- goto done;
- }
- swq->next_psn = sq->psn & BTH_PSN_MASK;
- if (swq->psn_search) {
- swq->psn_search->opcode_start_psn = cpu_to_le32(
- ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
- SQ_PSN_SEARCH_START_PSN_MASK) |
- ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
- SQ_PSN_SEARCH_OPCODE_MASK));
- swq->psn_search->flags_next_psn = cpu_to_le32(
- ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
- SQ_PSN_SEARCH_NEXT_PSN_MASK));
- }
- queue_err:
- if (sch_handler) {
- /* Store the ULP info in the software structures */
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
- swq = &sq->swq[sw_prod];
- swq->wr_id = wqe->wr_id;
- swq->type = wqe->type;
- swq->flags = wqe->flags;
- if (qp->sig_type)
- swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
- swq->start_psn = sq->psn & BTH_PSN_MASK;
- }
- sq->hwq.prod++;
- qp->wqe_cnt++;
- done:
- if (sch_handler) {
- nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
- if (nq_work) {
- nq_work->cq = qp->scq;
- nq_work->nq = qp->scq->nq;
- INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
- queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
- } else {
- dev_err(&sq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate SQ nq_work!");
- rc = -ENOMEM;
- }
- }
- return rc;
- }
- void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
- {
- struct bnxt_qplib_q *rq = &qp->rq;
- struct dbr_dbr db_msg = { 0 };
- u32 sw_prod;
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
- DBR_DBR_INDEX_MASK);
- db_msg.type_xid =
- cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- DBR_DBR_TYPE_RQ);
- /* Flush the writes to HW Rx WQE before the ringing Rx DB */
- wmb();
- __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
- }
- int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_swqe *wqe)
- {
- struct bnxt_qplib_q *rq = &qp->rq;
- struct rq_wqe *rqe, **rqe_ptr;
- struct sq_sge *hw_sge;
- struct bnxt_qplib_nq_work *nq_work = NULL;
- bool sch_handler = false;
- u32 sw_prod;
- int i, rc = 0;
- if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
- sch_handler = true;
- dev_dbg(&rq->hwq.pdev->dev,
- "%s Error QP. Scheduling for poll_cq\n",
- __func__);
- goto queue_err;
- }
- if (bnxt_qplib_queue_full(rq)) {
- dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
- rc = -EINVAL;
- goto done;
- }
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- rq->swq[sw_prod].wr_id = wqe->wr_id;
- rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
- rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
- memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
- /* Calculate wqe_size16 and data_len */
- for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
- i < wqe->num_sge; i++, hw_sge++) {
- hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
- hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
- hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
- }
- rqe->wqe_type = wqe->type;
- rqe->flags = wqe->flags;
- rqe->wqe_size = wqe->num_sge +
- ((offsetof(typeof(*rqe), data) + 15) >> 4);
- /* HW requires wqe size has room for atleast one SGE even if none
- * was supplied by ULP
- */
- if (!wqe->num_sge)
- rqe->wqe_size++;
- /* Supply the rqe->wr_id index to the wr_id_tbl for now */
- rqe->wr_id[0] = cpu_to_le32(sw_prod);
- queue_err:
- if (sch_handler) {
- /* Store the ULP info in the software structures */
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- rq->swq[sw_prod].wr_id = wqe->wr_id;
- }
- rq->hwq.prod++;
- if (sch_handler) {
- nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
- if (nq_work) {
- nq_work->cq = qp->rcq;
- nq_work->nq = qp->rcq->nq;
- INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
- queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
- } else {
- dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate RQ nq_work!");
- rc = -ENOMEM;
- }
- }
- done:
- return rc;
- }
- /* CQ */
- /* Spinlock must be held */
- static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
- {
- struct dbr_dbr db_msg = { 0 };
- db_msg.type_xid =
- cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- DBR_DBR_TYPE_CQ_ARMENA);
- /* Flush memory writes before enabling the CQ */
- wmb();
- __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
- }
- static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
- {
- struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- struct dbr_dbr db_msg = { 0 };
- u32 sw_cons;
- /* Ring DB */
- sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
- db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
- DBR_DBR_INDEX_MASK);
- db_msg.type_xid =
- cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- arm_type);
- /* flush memory writes before arming the CQ */
- wmb();
- __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
- }
- int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
- {
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_cq req;
- struct creq_create_cq_resp resp;
- struct bnxt_qplib_pbl *pbl;
- u16 cmd_flags = 0;
- int rc;
- cq->hwq.max_elements = cq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
- cq->nmap, &cq->hwq.max_elements,
- BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
- if (rc)
- goto exit;
- RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
- if (!cq->dpi) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
- return -EINVAL;
- }
- req.dpi = cpu_to_le32(cq->dpi->dpi);
- req.cq_handle = cpu_to_le64(cq->cq_handle);
- req.cq_size = cpu_to_le32(cq->hwq.max_elements);
- pbl = &cq->hwq.pbl[PBL_LVL_0];
- req.pg_size_lvl = cpu_to_le32(
- ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
- CMDQ_CREATE_CQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
- req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.cq_fco_cnq_id = cpu_to_le32(
- (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
- CMDQ_CREATE_CQ_CNQ_ID_SFT);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
- if (rc)
- goto fail;
- cq->id = le32_to_cpu(resp.xid);
- cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
- cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
- init_waitqueue_head(&cq->waitq);
- INIT_LIST_HEAD(&cq->sqf_head);
- INIT_LIST_HEAD(&cq->rqf_head);
- spin_lock_init(&cq->compl_lock);
- bnxt_qplib_arm_cq_enable(cq);
- return 0;
- fail:
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
- exit:
- return rc;
- }
- int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
- {
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_destroy_cq req;
- struct creq_destroy_cq_resp resp;
- u16 cmd_flags = 0;
- int rc;
- RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
- req.cq_cid = cpu_to_le32(cq->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
- if (rc)
- return rc;
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
- return 0;
- }
- static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cqe **pcqe, int *budget)
- {
- u32 sw_prod, sw_cons;
- struct bnxt_qplib_cqe *cqe;
- int rc = 0;
- /* Now complete all outstanding SQEs with FLUSHED_ERR */
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
- cqe = *pcqe;
- while (*budget) {
- sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
- if (sw_cons == sw_prod) {
- break;
- }
- /* Skip the FENCE WQE completions */
- if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
- bnxt_qplib_cancel_phantom_processing(qp);
- goto skip_compl;
- }
- memset(cqe, 0, sizeof(*cqe));
- cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
- cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
- cqe->qp_handle = (u64)(unsigned long)qp;
- cqe->wr_id = sq->swq[sw_cons].wr_id;
- cqe->src_qp = qp->id;
- cqe->type = sq->swq[sw_cons].type;
- cqe++;
- (*budget)--;
- skip_compl:
- sq->hwq.cons++;
- }
- *pcqe = cqe;
- if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
- /* Out of budget */
- rc = -EAGAIN;
- return rc;
- }
- static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cqe **pcqe, int *budget)
- {
- struct bnxt_qplib_cqe *cqe;
- u32 sw_prod, sw_cons;
- int rc = 0;
- int opcode = 0;
- switch (qp->type) {
- case CMDQ_CREATE_QP1_TYPE_GSI:
- opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
- break;
- case CMDQ_CREATE_QP_TYPE_RC:
- opcode = CQ_BASE_CQE_TYPE_RES_RC;
- break;
- case CMDQ_CREATE_QP_TYPE_UD:
- opcode = CQ_BASE_CQE_TYPE_RES_UD;
- break;
- }
- /* Flush the rest of the RQ */
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- cqe = *pcqe;
- while (*budget) {
- sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
- if (sw_cons == sw_prod)
- break;
- memset(cqe, 0, sizeof(*cqe));
- cqe->status =
- CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
- cqe->opcode = opcode;
- cqe->qp_handle = (unsigned long)qp;
- cqe->wr_id = rq->swq[sw_cons].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- }
- *pcqe = cqe;
- if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
- /* Out of budget */
- rc = -EAGAIN;
- return rc;
- }
- void bnxt_qplib_mark_qp_error(void *qp_handle)
- {
- struct bnxt_qplib_qp *qp = qp_handle;
- if (!qp)
- return;
- /* Must block new posting of SQ and RQ */
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- bnxt_qplib_cancel_phantom_processing(qp);
- /* Add qp to flush list of the CQ */
- __bnxt_qplib_add_flush_qp(qp);
- }
- /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
- * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
- */
- static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
- u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
- {
- struct bnxt_qplib_q *sq = &qp->sq;
- struct bnxt_qplib_swq *swq;
- u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
- struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
- struct cq_req *peek_req_hwcqe;
- struct bnxt_qplib_qp *peek_qp;
- struct bnxt_qplib_q *peek_sq;
- int i, rc = 0;
- /* Normal mode */
- /* Check for the psn_search marking before completing */
- swq = &sq->swq[sw_sq_cons];
- if (swq->psn_search &&
- le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
- /* Unmark */
- swq->psn_search->flags_next_psn = cpu_to_le32
- (le32_to_cpu(swq->psn_search->flags_next_psn)
- & ~0x80000000);
- dev_dbg(&cq->hwq.pdev->dev,
- "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
- cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
- sq->condition = true;
- sq->send_phantom = true;
- /* TODO: Only ARM if the previous SQE is ARMALL */
- bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
- rc = -EAGAIN;
- goto out;
- }
- if (sq->condition) {
- /* Peek at the completions */
- peek_raw_cq_cons = cq->hwq.cons;
- peek_sw_cq_cons = cq_cons;
- i = cq->hwq.max_elements;
- while (i--) {
- peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
- peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
- [CQE_IDX(peek_sw_cq_cons)];
- /* If the next hwcqe is VALID */
- if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
- cq->hwq.max_elements)) {
- /*
- * The valid test of the entry must be done first before
- * reading any further.
- */
- dma_rmb();
- /* If the next hwcqe is a REQ */
- if ((peek_hwcqe->cqe_type_toggle &
- CQ_BASE_CQE_TYPE_MASK) ==
- CQ_BASE_CQE_TYPE_REQ) {
- peek_req_hwcqe = (struct cq_req *)
- peek_hwcqe;
- peek_qp = (struct bnxt_qplib_qp *)
- ((unsigned long)
- le64_to_cpu
- (peek_req_hwcqe->qp_handle));
- peek_sq = &peek_qp->sq;
- peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
- peek_req_hwcqe->sq_cons_idx) - 1
- , &sq->hwq);
- /* If the hwcqe's sq's wr_id matches */
- if (peek_sq == sq &&
- sq->swq[peek_sq_cons_idx].wr_id ==
- BNXT_QPLIB_FENCE_WRID) {
- /*
- * Unbreak only if the phantom
- * comes back
- */
- dev_dbg(&cq->hwq.pdev->dev,
- "FP:Got Phantom CQE");
- sq->condition = false;
- sq->single = true;
- rc = 0;
- goto out;
- }
- }
- /* Valid but not the phantom, so keep looping */
- } else {
- /* Not valid yet, just exit and wait */
- rc = -EINVAL;
- goto out;
- }
- peek_sw_cq_cons++;
- peek_raw_cq_cons++;
- }
- dev_err(&cq->hwq.pdev->dev,
- "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
- cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
- rc = -EINVAL;
- }
- out:
- return rc;
- }
- static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
- struct cq_req *hwcqe,
- struct bnxt_qplib_cqe **pcqe, int *budget,
- u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
- {
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *sq;
- struct bnxt_qplib_cqe *cqe;
- u32 sw_sq_cons, cqe_sq_cons;
- struct bnxt_qplib_swq *swq;
- int rc = 0;
- qp = (struct bnxt_qplib_qp *)((unsigned long)
- le64_to_cpu(hwcqe->qp_handle));
- if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: Process Req qp is NULL");
- return -EINVAL;
- }
- sq = &qp->sq;
- cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
- if (cqe_sq_cons > sq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process req reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
- cqe_sq_cons, sq->hwq.max_elements);
- return -EINVAL;
- }
- if (qp->sq.flushed) {
- dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
- goto done;
- }
- /* Require to walk the sq's swq to fabricate CQEs for all previously
- * signaled SWQEs due to CQE aggregation from the current sq cons
- * to the cqe_sq_cons
- */
- cqe = *pcqe;
- while (*budget) {
- sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
- if (sw_sq_cons == cqe_sq_cons)
- /* Done */
- break;
- swq = &sq->swq[sw_sq_cons];
- memset(cqe, 0, sizeof(*cqe));
- cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
- cqe->qp_handle = (u64)(unsigned long)qp;
- cqe->src_qp = qp->id;
- cqe->wr_id = swq->wr_id;
- if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
- goto skip;
- cqe->type = swq->type;
- /* For the last CQE, check for status. For errors, regardless
- * of the request being signaled or not, it must complete with
- * the hwcqe error status
- */
- if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
- hwcqe->status != CQ_REQ_STATUS_OK) {
- cqe->status = hwcqe->status;
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed Req ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
- sw_sq_cons, cqe->wr_id, cqe->status);
- cqe++;
- (*budget)--;
- bnxt_qplib_lock_buddy_cq(qp, cq);
- bnxt_qplib_mark_qp_error(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
- } else {
- if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
- /* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
- }
- cqe->status = CQ_REQ_STATUS_OK;
- cqe++;
- (*budget)--;
- }
- }
- skip:
- sq->hwq.cons++;
- if (sq->single)
- break;
- }
- out:
- *pcqe = cqe;
- if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
- /* Out of budget */
- rc = -EAGAIN;
- goto done;
- }
- /*
- * Back to normal completion mode only after it has completed all of
- * the WC for this CQE
- */
- sq->single = false;
- done:
- return rc;
- }
- static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
- struct cq_res_rc *hwcqe,
- struct bnxt_qplib_cqe **pcqe,
- int *budget)
- {
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
- struct bnxt_qplib_cqe *cqe;
- u32 wr_id_idx;
- int rc = 0;
- qp = (struct bnxt_qplib_qp *)((unsigned long)
- le64_to_cpu(hwcqe->qp_handle));
- if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
- return -EINVAL;
- }
- if (qp->rq.flushed) {
- dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
- goto done;
- }
- cqe = *pcqe;
- cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
- cqe->length = le32_to_cpu(hwcqe->length);
- cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
- cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
- cqe->flags = le16_to_cpu(hwcqe->flags);
- cqe->status = hwcqe->status;
- cqe->qp_handle = (u64)(unsigned long)qp;
- wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
- CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
- }
- done:
- return rc;
- }
- static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
- struct cq_res_ud *hwcqe,
- struct bnxt_qplib_cqe **pcqe,
- int *budget)
- {
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
- struct bnxt_qplib_cqe *cqe;
- u32 wr_id_idx;
- int rc = 0;
- qp = (struct bnxt_qplib_qp *)((unsigned long)
- le64_to_cpu(hwcqe->qp_handle));
- if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
- return -EINVAL;
- }
- if (qp->rq.flushed) {
- dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
- goto done;
- }
- cqe = *pcqe;
- cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
- cqe->length = le32_to_cpu(hwcqe->length);
- cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
- cqe->flags = le16_to_cpu(hwcqe->flags);
- cqe->status = hwcqe->status;
- cqe->qp_handle = (u64)(unsigned long)qp;
- memcpy(cqe->smac, hwcqe->src_mac, 6);
- wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
- & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
- cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
- ((le32_to_cpu(
- hwcqe->src_qp_high_srq_or_rq_wr_id) &
- CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx %#x exceeded RQ max %#x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
- }
- done:
- return rc;
- }
- bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
- {
- struct cq_base *hw_cqe, **hw_cqe_ptr;
- unsigned long flags;
- u32 sw_cons, raw_cons;
- bool rc = true;
- spin_lock_irqsave(&cq->hwq.lock, flags);
- raw_cons = cq->hwq.cons;
- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
- /* Check for Valid bit. If the CQE is valid, return false */
- rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
- return rc;
- }
- static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
- struct cq_res_raweth_qp1 *hwcqe,
- struct bnxt_qplib_cqe **pcqe,
- int *budget)
- {
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
- struct bnxt_qplib_cqe *cqe;
- u32 wr_id_idx;
- int rc = 0;
- qp = (struct bnxt_qplib_qp *)((unsigned long)
- le64_to_cpu(hwcqe->qp_handle));
- if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq Raw/QP1 qp is NULL");
- return -EINVAL;
- }
- if (qp->rq.flushed) {
- dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
- goto done;
- }
- cqe = *pcqe;
- cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
- cqe->flags = le16_to_cpu(hwcqe->flags);
- cqe->qp_handle = (u64)(unsigned long)qp;
- wr_id_idx =
- le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
- & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
- cqe->src_qp = qp->id;
- if (qp->id == 1 && !cqe->length) {
- /* Add workaround for the length misdetection */
- cqe->length = 296;
- } else {
- cqe->length = le16_to_cpu(hwcqe->length);
- }
- cqe->pkey_index = qp->pkey_index;
- memcpy(cqe->smac, qp->smac, 6);
- cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
- cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
- cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
- }
- done:
- return rc;
- }
- static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
- struct cq_terminal *hwcqe,
- struct bnxt_qplib_cqe **pcqe,
- int *budget)
- {
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *sq, *rq;
- struct bnxt_qplib_cqe *cqe;
- u32 sw_cons = 0, cqe_cons;
- int rc = 0;
- /* Check the Status */
- if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
- dev_warn(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
- hwcqe->status);
- qp = (struct bnxt_qplib_qp *)((unsigned long)
- le64_to_cpu(hwcqe->qp_handle));
- if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal qp is NULL");
- return -EINVAL;
- }
- /* Must block new posting of SQ and RQ */
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- sq = &qp->sq;
- rq = &qp->rq;
- cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
- if (cqe_cons == 0xFFFF)
- goto do_rq;
- if (cqe_cons > sq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
- cqe_cons, sq->hwq.max_elements);
- goto do_rq;
- }
- if (qp->sq.flushed) {
- dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
- goto sq_done;
- }
- /* Terminal CQE can also include aggregated successful CQEs prior.
- * So we must complete all CQEs from the current sq's cons to the
- * cq_cons with status OK
- */
- cqe = *pcqe;
- while (*budget) {
- sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
- if (sw_cons == cqe_cons)
- break;
- if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
- memset(cqe, 0, sizeof(*cqe));
- cqe->status = CQ_REQ_STATUS_OK;
- cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
- cqe->qp_handle = (u64)(unsigned long)qp;
- cqe->src_qp = qp->id;
- cqe->wr_id = sq->swq[sw_cons].wr_id;
- cqe->type = sq->swq[sw_cons].type;
- cqe++;
- (*budget)--;
- }
- sq->hwq.cons++;
- }
- *pcqe = cqe;
- if (!(*budget) && sw_cons != cqe_cons) {
- /* Out of budget */
- rc = -EAGAIN;
- goto sq_done;
- }
- sq_done:
- if (rc)
- return rc;
- do_rq:
- cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
- if (cqe_cons == 0xFFFF) {
- goto done;
- } else if (cqe_cons > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed terminal ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
- cqe_cons, rq->hwq.max_elements);
- goto done;
- }
- if (qp->rq.flushed) {
- dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
- rc = 0;
- goto done;
- }
- /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
- * from the current rq->cons to the rq->prod regardless what the
- * rq->cons the terminal CQE indicates
- */
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
- done:
- return rc;
- }
- static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
- struct cq_cutoff *hwcqe)
- {
- /* Check the Status */
- if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
- hwcqe->status);
- return -EINVAL;
- }
- clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
- wake_up_interruptible(&cq->waitq);
- return 0;
- }
- int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
- struct bnxt_qplib_cqe *cqe,
- int num_cqes)
- {
- struct bnxt_qplib_qp *qp = NULL;
- u32 budget = num_cqes;
- unsigned long flags;
- spin_lock_irqsave(&cq->hwq.lock, flags);
- list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing SQ QP= %p",
- qp);
- __flush_sq(&qp->sq, qp, &cqe, &budget);
- }
- list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing RQ QP= %p",
- qp);
- __flush_rq(&qp->rq, qp, &cqe, &budget);
- }
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
- return num_cqes - budget;
- }
- int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
- int num_cqes, struct bnxt_qplib_qp **lib_qp)
- {
- struct cq_base *hw_cqe, **hw_cqe_ptr;
- unsigned long flags;
- u32 sw_cons, raw_cons;
- int budget, rc = 0;
- spin_lock_irqsave(&cq->hwq.lock, flags);
- raw_cons = cq->hwq.cons;
- budget = num_cqes;
- while (budget) {
- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
- /* Check for Valid bit */
- if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
- break;
- /*
- * The valid test of the entry must be done first before
- * reading any further.
- */
- dma_rmb();
- /* From the device's respective CQE format to qplib_wc*/
- switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
- case CQ_BASE_CQE_TYPE_REQ:
- rc = bnxt_qplib_cq_process_req(cq,
- (struct cq_req *)hw_cqe,
- &cqe, &budget,
- sw_cons, lib_qp);
- break;
- case CQ_BASE_CQE_TYPE_RES_RC:
- rc = bnxt_qplib_cq_process_res_rc(cq,
- (struct cq_res_rc *)
- hw_cqe, &cqe,
- &budget);
- break;
- case CQ_BASE_CQE_TYPE_RES_UD:
- rc = bnxt_qplib_cq_process_res_ud
- (cq, (struct cq_res_ud *)hw_cqe, &cqe,
- &budget);
- break;
- case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
- rc = bnxt_qplib_cq_process_res_raweth_qp1
- (cq, (struct cq_res_raweth_qp1 *)
- hw_cqe, &cqe, &budget);
- break;
- case CQ_BASE_CQE_TYPE_TERMINAL:
- rc = bnxt_qplib_cq_process_terminal
- (cq, (struct cq_terminal *)hw_cqe,
- &cqe, &budget);
- break;
- case CQ_BASE_CQE_TYPE_CUT_OFF:
- bnxt_qplib_cq_process_cutoff
- (cq, (struct cq_cutoff *)hw_cqe);
- /* Done processing this CQ */
- goto exit;
- default:
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq unknown type 0x%lx",
- hw_cqe->cqe_type_toggle &
- CQ_BASE_CQE_TYPE_MASK);
- rc = -EINVAL;
- break;
- }
- if (rc < 0) {
- if (rc == -EAGAIN)
- break;
- /* Error while processing the CQE, just skip to the
- * next one
- */
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cqe error rc = 0x%x", rc);
- }
- raw_cons++;
- }
- if (cq->hwq.cons != raw_cons) {
- cq->hwq.cons = raw_cons;
- bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
- }
- exit:
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
- return num_cqes - budget;
- }
- void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
- {
- unsigned long flags;
- spin_lock_irqsave(&cq->hwq.lock, flags);
- if (arm_type)
- bnxt_qplib_arm_cq(cq, arm_type);
- /* Using cq->arm_state variable to track whether to issue cq handler */
- atomic_set(&cq->arm_state, 1);
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
- }
- void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
- {
- flush_workqueue(qp->scq->nq->cqn_wq);
- if (qp->scq != qp->rcq)
- flush_workqueue(qp->rcq->nq->cqn_wq);
- }
|