qede_main.c 102 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897
  1. /* QLogic qede NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/version.h>
  11. #include <linux/device.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/errno.h>
  16. #include <linux/list.h>
  17. #include <linux/string.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/interrupt.h>
  20. #include <asm/byteorder.h>
  21. #include <asm/param.h>
  22. #include <linux/io.h>
  23. #include <linux/netdev_features.h>
  24. #include <linux/udp.h>
  25. #include <linux/tcp.h>
  26. #include <net/udp_tunnel.h>
  27. #include <linux/ip.h>
  28. #include <net/ipv6.h>
  29. #include <net/tcp.h>
  30. #include <linux/if_ether.h>
  31. #include <linux/if_vlan.h>
  32. #include <linux/pkt_sched.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/in.h>
  35. #include <linux/random.h>
  36. #include <net/ip6_checksum.h>
  37. #include <linux/bitops.h>
  38. #include "qede.h"
  39. static char version[] =
  40. "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
  41. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  42. MODULE_LICENSE("GPL");
  43. MODULE_VERSION(DRV_MODULE_VERSION);
  44. static uint debug;
  45. module_param(debug, uint, 0);
  46. MODULE_PARM_DESC(debug, " Default debug msglevel");
  47. static const struct qed_eth_ops *qed_ops;
  48. #define CHIP_NUM_57980S_40 0x1634
  49. #define CHIP_NUM_57980S_10 0x1666
  50. #define CHIP_NUM_57980S_MF 0x1636
  51. #define CHIP_NUM_57980S_100 0x1644
  52. #define CHIP_NUM_57980S_50 0x1654
  53. #define CHIP_NUM_57980S_25 0x1656
  54. #define CHIP_NUM_57980S_IOV 0x1664
  55. #ifndef PCI_DEVICE_ID_NX2_57980E
  56. #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
  57. #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
  58. #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
  59. #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
  60. #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
  61. #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
  62. #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
  63. #endif
  64. enum qede_pci_private {
  65. QEDE_PRIVATE_PF,
  66. QEDE_PRIVATE_VF
  67. };
  68. static const struct pci_device_id qede_pci_tbl[] = {
  69. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
  70. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
  71. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
  72. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
  73. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
  74. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
  75. #ifdef CONFIG_QED_SRIOV
  76. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
  77. #endif
  78. { 0 }
  79. };
  80. MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
  81. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  82. #define TX_TIMEOUT (5 * HZ)
  83. static void qede_remove(struct pci_dev *pdev);
  84. static int qede_alloc_rx_buffer(struct qede_dev *edev,
  85. struct qede_rx_queue *rxq);
  86. static void qede_link_update(void *dev, struct qed_link_output *link);
  87. #ifdef CONFIG_QED_SRIOV
  88. static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
  89. {
  90. struct qede_dev *edev = netdev_priv(ndev);
  91. if (vlan > 4095) {
  92. DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
  93. return -EINVAL;
  94. }
  95. DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
  96. vlan, vf);
  97. return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
  98. }
  99. static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
  100. {
  101. struct qede_dev *edev = netdev_priv(ndev);
  102. DP_VERBOSE(edev, QED_MSG_IOV,
  103. "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
  104. mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
  105. if (!is_valid_ether_addr(mac)) {
  106. DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
  107. return -EINVAL;
  108. }
  109. return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
  110. }
  111. static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
  112. {
  113. struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
  114. struct qed_dev_info *qed_info = &edev->dev_info.common;
  115. int rc;
  116. DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
  117. rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
  118. /* Enable/Disable Tx switching for PF */
  119. if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
  120. qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
  121. struct qed_update_vport_params params;
  122. memset(&params, 0, sizeof(params));
  123. params.vport_id = 0;
  124. params.update_tx_switching_flg = 1;
  125. params.tx_switching_flg = num_vfs_param ? 1 : 0;
  126. edev->ops->vport_update(edev->cdev, &params);
  127. }
  128. return rc;
  129. }
  130. #endif
  131. static struct pci_driver qede_pci_driver = {
  132. .name = "qede",
  133. .id_table = qede_pci_tbl,
  134. .probe = qede_probe,
  135. .remove = qede_remove,
  136. #ifdef CONFIG_QED_SRIOV
  137. .sriov_configure = qede_sriov_configure,
  138. #endif
  139. };
  140. static void qede_force_mac(void *dev, u8 *mac)
  141. {
  142. struct qede_dev *edev = dev;
  143. ether_addr_copy(edev->ndev->dev_addr, mac);
  144. ether_addr_copy(edev->primary_mac, mac);
  145. }
  146. static struct qed_eth_cb_ops qede_ll_ops = {
  147. {
  148. .link_update = qede_link_update,
  149. },
  150. .force_mac = qede_force_mac,
  151. };
  152. static int qede_netdev_event(struct notifier_block *this, unsigned long event,
  153. void *ptr)
  154. {
  155. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  156. struct ethtool_drvinfo drvinfo;
  157. struct qede_dev *edev;
  158. /* Currently only support name change */
  159. if (event != NETDEV_CHANGENAME)
  160. goto done;
  161. /* Check whether this is a qede device */
  162. if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
  163. goto done;
  164. memset(&drvinfo, 0, sizeof(drvinfo));
  165. ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
  166. if (strcmp(drvinfo.driver, "qede"))
  167. goto done;
  168. edev = netdev_priv(ndev);
  169. /* Notify qed of the name change */
  170. if (!edev->ops || !edev->ops->common)
  171. goto done;
  172. edev->ops->common->set_id(edev->cdev, edev->ndev->name,
  173. "qede");
  174. done:
  175. return NOTIFY_DONE;
  176. }
  177. static struct notifier_block qede_netdev_notifier = {
  178. .notifier_call = qede_netdev_event,
  179. };
  180. static
  181. int __init qede_init(void)
  182. {
  183. int ret;
  184. pr_info("qede_init: %s\n", version);
  185. qed_ops = qed_get_eth_ops();
  186. if (!qed_ops) {
  187. pr_notice("Failed to get qed ethtool operations\n");
  188. return -EINVAL;
  189. }
  190. /* Must register notifier before pci ops, since we might miss
  191. * interface rename after pci probe and netdev registeration.
  192. */
  193. ret = register_netdevice_notifier(&qede_netdev_notifier);
  194. if (ret) {
  195. pr_notice("Failed to register netdevice_notifier\n");
  196. qed_put_eth_ops();
  197. return -EINVAL;
  198. }
  199. ret = pci_register_driver(&qede_pci_driver);
  200. if (ret) {
  201. pr_notice("Failed to register driver\n");
  202. unregister_netdevice_notifier(&qede_netdev_notifier);
  203. qed_put_eth_ops();
  204. return -EINVAL;
  205. }
  206. return 0;
  207. }
  208. static void __exit qede_cleanup(void)
  209. {
  210. if (debug & QED_LOG_INFO_MASK)
  211. pr_info("qede_cleanup called\n");
  212. unregister_netdevice_notifier(&qede_netdev_notifier);
  213. pci_unregister_driver(&qede_pci_driver);
  214. qed_put_eth_ops();
  215. }
  216. module_init(qede_init);
  217. module_exit(qede_cleanup);
  218. /* -------------------------------------------------------------------------
  219. * START OF FAST-PATH
  220. * -------------------------------------------------------------------------
  221. */
  222. /* Unmap the data and free skb */
  223. static int qede_free_tx_pkt(struct qede_dev *edev,
  224. struct qede_tx_queue *txq, int *len)
  225. {
  226. u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
  227. struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
  228. struct eth_tx_1st_bd *first_bd;
  229. struct eth_tx_bd *tx_data_bd;
  230. int bds_consumed = 0;
  231. int nbds;
  232. bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
  233. int i, split_bd_len = 0;
  234. if (unlikely(!skb)) {
  235. DP_ERR(edev,
  236. "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
  237. idx, txq->sw_tx_cons, txq->sw_tx_prod);
  238. return -1;
  239. }
  240. *len = skb->len;
  241. first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
  242. bds_consumed++;
  243. nbds = first_bd->data.nbds;
  244. if (data_split) {
  245. struct eth_tx_bd *split = (struct eth_tx_bd *)
  246. qed_chain_consume(&txq->tx_pbl);
  247. split_bd_len = BD_UNMAP_LEN(split);
  248. bds_consumed++;
  249. }
  250. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
  251. BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
  252. /* Unmap the data of the skb frags */
  253. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
  254. tx_data_bd = (struct eth_tx_bd *)
  255. qed_chain_consume(&txq->tx_pbl);
  256. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
  257. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  258. }
  259. while (bds_consumed++ < nbds)
  260. qed_chain_consume(&txq->tx_pbl);
  261. /* Free skb */
  262. dev_kfree_skb_any(skb);
  263. txq->sw_tx_ring[idx].skb = NULL;
  264. txq->sw_tx_ring[idx].flags = 0;
  265. return 0;
  266. }
  267. /* Unmap the data and free skb when mapping failed during start_xmit */
  268. static void qede_free_failed_tx_pkt(struct qede_dev *edev,
  269. struct qede_tx_queue *txq,
  270. struct eth_tx_1st_bd *first_bd,
  271. int nbd, bool data_split)
  272. {
  273. u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
  274. struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
  275. struct eth_tx_bd *tx_data_bd;
  276. int i, split_bd_len = 0;
  277. /* Return prod to its position before this skb was handled */
  278. qed_chain_set_prod(&txq->tx_pbl,
  279. le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
  280. first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
  281. if (data_split) {
  282. struct eth_tx_bd *split = (struct eth_tx_bd *)
  283. qed_chain_produce(&txq->tx_pbl);
  284. split_bd_len = BD_UNMAP_LEN(split);
  285. nbd--;
  286. }
  287. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
  288. BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
  289. /* Unmap the data of the skb frags */
  290. for (i = 0; i < nbd; i++) {
  291. tx_data_bd = (struct eth_tx_bd *)
  292. qed_chain_produce(&txq->tx_pbl);
  293. if (tx_data_bd->nbytes)
  294. dma_unmap_page(&edev->pdev->dev,
  295. BD_UNMAP_ADDR(tx_data_bd),
  296. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  297. }
  298. /* Return again prod to its position before this skb was handled */
  299. qed_chain_set_prod(&txq->tx_pbl,
  300. le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
  301. /* Free skb */
  302. dev_kfree_skb_any(skb);
  303. txq->sw_tx_ring[idx].skb = NULL;
  304. txq->sw_tx_ring[idx].flags = 0;
  305. }
  306. static u32 qede_xmit_type(struct qede_dev *edev,
  307. struct sk_buff *skb, int *ipv6_ext)
  308. {
  309. u32 rc = XMIT_L4_CSUM;
  310. __be16 l3_proto;
  311. if (skb->ip_summed != CHECKSUM_PARTIAL)
  312. return XMIT_PLAIN;
  313. l3_proto = vlan_get_protocol(skb);
  314. if (l3_proto == htons(ETH_P_IPV6) &&
  315. (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
  316. *ipv6_ext = 1;
  317. if (skb->encapsulation)
  318. rc |= XMIT_ENC;
  319. if (skb_is_gso(skb))
  320. rc |= XMIT_LSO;
  321. return rc;
  322. }
  323. static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
  324. struct eth_tx_2nd_bd *second_bd,
  325. struct eth_tx_3rd_bd *third_bd)
  326. {
  327. u8 l4_proto;
  328. u16 bd2_bits1 = 0, bd2_bits2 = 0;
  329. bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
  330. bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
  331. ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
  332. << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
  333. bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
  334. ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
  335. if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
  336. l4_proto = ipv6_hdr(skb)->nexthdr;
  337. else
  338. l4_proto = ip_hdr(skb)->protocol;
  339. if (l4_proto == IPPROTO_UDP)
  340. bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
  341. if (third_bd)
  342. third_bd->data.bitfields |=
  343. cpu_to_le16(((tcp_hdrlen(skb) / 4) &
  344. ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
  345. ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
  346. second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
  347. second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
  348. }
  349. static int map_frag_to_bd(struct qede_dev *edev,
  350. skb_frag_t *frag, struct eth_tx_bd *bd)
  351. {
  352. dma_addr_t mapping;
  353. /* Map skb non-linear frag data for DMA */
  354. mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
  355. skb_frag_size(frag), DMA_TO_DEVICE);
  356. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  357. DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
  358. return -ENOMEM;
  359. }
  360. /* Setup the data pointer of the frag data */
  361. BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
  362. return 0;
  363. }
  364. static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
  365. {
  366. if (is_encap_pkt)
  367. return (skb_inner_transport_header(skb) +
  368. inner_tcp_hdrlen(skb) - skb->data);
  369. else
  370. return (skb_transport_header(skb) +
  371. tcp_hdrlen(skb) - skb->data);
  372. }
  373. /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
  374. #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
  375. static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
  376. u8 xmit_type)
  377. {
  378. int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
  379. if (xmit_type & XMIT_LSO) {
  380. int hlen;
  381. hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
  382. /* linear payload would require its own BD */
  383. if (skb_headlen(skb) > hlen)
  384. allowed_frags--;
  385. }
  386. return (skb_shinfo(skb)->nr_frags > allowed_frags);
  387. }
  388. #endif
  389. static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
  390. {
  391. /* wmb makes sure that the BDs data is updated before updating the
  392. * producer, otherwise FW may read old data from the BDs.
  393. */
  394. wmb();
  395. barrier();
  396. writel(txq->tx_db.raw, txq->doorbell_addr);
  397. /* mmiowb is needed to synchronize doorbell writes from more than one
  398. * processor. It guarantees that the write arrives to the device before
  399. * the queue lock is released and another start_xmit is called (possibly
  400. * on another CPU). Without this barrier, the next doorbell can bypass
  401. * this doorbell. This is applicable to IA64/Altix systems.
  402. */
  403. mmiowb();
  404. }
  405. /* Main transmit function */
  406. static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
  407. struct net_device *ndev)
  408. {
  409. struct qede_dev *edev = netdev_priv(ndev);
  410. struct netdev_queue *netdev_txq;
  411. struct qede_tx_queue *txq;
  412. struct eth_tx_1st_bd *first_bd;
  413. struct eth_tx_2nd_bd *second_bd = NULL;
  414. struct eth_tx_3rd_bd *third_bd = NULL;
  415. struct eth_tx_bd *tx_data_bd = NULL;
  416. u16 txq_index;
  417. u8 nbd = 0;
  418. dma_addr_t mapping;
  419. int rc, frag_idx = 0, ipv6_ext = 0;
  420. u8 xmit_type;
  421. u16 idx;
  422. u16 hlen;
  423. bool data_split = false;
  424. /* Get tx-queue context and netdev index */
  425. txq_index = skb_get_queue_mapping(skb);
  426. WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
  427. txq = QEDE_TX_QUEUE(edev, txq_index);
  428. netdev_txq = netdev_get_tx_queue(ndev, txq_index);
  429. WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
  430. xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
  431. #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
  432. if (qede_pkt_req_lin(edev, skb, xmit_type)) {
  433. if (skb_linearize(skb)) {
  434. DP_NOTICE(edev,
  435. "SKB linearization failed - silently dropping this SKB\n");
  436. dev_kfree_skb_any(skb);
  437. return NETDEV_TX_OK;
  438. }
  439. }
  440. #endif
  441. /* Fill the entry in the SW ring and the BDs in the FW ring */
  442. idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
  443. txq->sw_tx_ring[idx].skb = skb;
  444. first_bd = (struct eth_tx_1st_bd *)
  445. qed_chain_produce(&txq->tx_pbl);
  446. memset(first_bd, 0, sizeof(*first_bd));
  447. first_bd->data.bd_flags.bitfields =
  448. 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
  449. /* Map skb linear data for DMA and set in the first BD */
  450. mapping = dma_map_single(&edev->pdev->dev, skb->data,
  451. skb_headlen(skb), DMA_TO_DEVICE);
  452. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  453. DP_NOTICE(edev, "SKB mapping failed\n");
  454. qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
  455. qede_update_tx_producer(txq);
  456. return NETDEV_TX_OK;
  457. }
  458. nbd++;
  459. BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
  460. /* In case there is IPv6 with extension headers or LSO we need 2nd and
  461. * 3rd BDs.
  462. */
  463. if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
  464. second_bd = (struct eth_tx_2nd_bd *)
  465. qed_chain_produce(&txq->tx_pbl);
  466. memset(second_bd, 0, sizeof(*second_bd));
  467. nbd++;
  468. third_bd = (struct eth_tx_3rd_bd *)
  469. qed_chain_produce(&txq->tx_pbl);
  470. memset(third_bd, 0, sizeof(*third_bd));
  471. nbd++;
  472. /* We need to fill in additional data in second_bd... */
  473. tx_data_bd = (struct eth_tx_bd *)second_bd;
  474. }
  475. if (skb_vlan_tag_present(skb)) {
  476. first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
  477. first_bd->data.bd_flags.bitfields |=
  478. 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
  479. }
  480. /* Fill the parsing flags & params according to the requested offload */
  481. if (xmit_type & XMIT_L4_CSUM) {
  482. /* We don't re-calculate IP checksum as it is already done by
  483. * the upper stack
  484. */
  485. first_bd->data.bd_flags.bitfields |=
  486. 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
  487. if (xmit_type & XMIT_ENC) {
  488. first_bd->data.bd_flags.bitfields |=
  489. 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
  490. first_bd->data.bitfields |=
  491. 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
  492. }
  493. /* Legacy FW had flipped behavior in regard to this bit -
  494. * I.e., needed to set to prevent FW from touching encapsulated
  495. * packets when it didn't need to.
  496. */
  497. if (unlikely(txq->is_legacy))
  498. first_bd->data.bitfields ^=
  499. 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
  500. /* If the packet is IPv6 with extension header, indicate that
  501. * to FW and pass few params, since the device cracker doesn't
  502. * support parsing IPv6 with extension header/s.
  503. */
  504. if (unlikely(ipv6_ext))
  505. qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
  506. }
  507. if (xmit_type & XMIT_LSO) {
  508. first_bd->data.bd_flags.bitfields |=
  509. (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
  510. third_bd->data.lso_mss =
  511. cpu_to_le16(skb_shinfo(skb)->gso_size);
  512. if (unlikely(xmit_type & XMIT_ENC)) {
  513. first_bd->data.bd_flags.bitfields |=
  514. 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
  515. hlen = qede_get_skb_hlen(skb, true);
  516. } else {
  517. first_bd->data.bd_flags.bitfields |=
  518. 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
  519. hlen = qede_get_skb_hlen(skb, false);
  520. }
  521. /* @@@TBD - if will not be removed need to check */
  522. third_bd->data.bitfields |=
  523. cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
  524. /* Make life easier for FW guys who can't deal with header and
  525. * data on same BD. If we need to split, use the second bd...
  526. */
  527. if (unlikely(skb_headlen(skb) > hlen)) {
  528. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  529. "TSO split header size is %d (%x:%x)\n",
  530. first_bd->nbytes, first_bd->addr.hi,
  531. first_bd->addr.lo);
  532. mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
  533. le32_to_cpu(first_bd->addr.lo)) +
  534. hlen;
  535. BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
  536. le16_to_cpu(first_bd->nbytes) -
  537. hlen);
  538. /* this marks the BD as one that has no
  539. * individual mapping
  540. */
  541. txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
  542. first_bd->nbytes = cpu_to_le16(hlen);
  543. tx_data_bd = (struct eth_tx_bd *)third_bd;
  544. data_split = true;
  545. }
  546. } else {
  547. first_bd->data.bitfields |=
  548. (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
  549. ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
  550. }
  551. /* Handle fragmented skb */
  552. /* special handle for frags inside 2nd and 3rd bds.. */
  553. while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
  554. rc = map_frag_to_bd(edev,
  555. &skb_shinfo(skb)->frags[frag_idx],
  556. tx_data_bd);
  557. if (rc) {
  558. qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
  559. data_split);
  560. qede_update_tx_producer(txq);
  561. return NETDEV_TX_OK;
  562. }
  563. if (tx_data_bd == (struct eth_tx_bd *)second_bd)
  564. tx_data_bd = (struct eth_tx_bd *)third_bd;
  565. else
  566. tx_data_bd = NULL;
  567. frag_idx++;
  568. }
  569. /* map last frags into 4th, 5th .... */
  570. for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
  571. tx_data_bd = (struct eth_tx_bd *)
  572. qed_chain_produce(&txq->tx_pbl);
  573. memset(tx_data_bd, 0, sizeof(*tx_data_bd));
  574. rc = map_frag_to_bd(edev,
  575. &skb_shinfo(skb)->frags[frag_idx],
  576. tx_data_bd);
  577. if (rc) {
  578. qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
  579. data_split);
  580. qede_update_tx_producer(txq);
  581. return NETDEV_TX_OK;
  582. }
  583. }
  584. /* update the first BD with the actual num BDs */
  585. first_bd->data.nbds = nbd;
  586. netdev_tx_sent_queue(netdev_txq, skb->len);
  587. skb_tx_timestamp(skb);
  588. /* Advance packet producer only before sending the packet since mapping
  589. * of pages may fail.
  590. */
  591. txq->sw_tx_prod++;
  592. /* 'next page' entries are counted in the producer value */
  593. txq->tx_db.data.bd_prod =
  594. cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
  595. if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
  596. qede_update_tx_producer(txq);
  597. if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
  598. < (MAX_SKB_FRAGS + 1))) {
  599. if (skb->xmit_more)
  600. qede_update_tx_producer(txq);
  601. netif_tx_stop_queue(netdev_txq);
  602. txq->stopped_cnt++;
  603. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  604. "Stop queue was called\n");
  605. /* paired memory barrier is in qede_tx_int(), we have to keep
  606. * ordering of set_bit() in netif_tx_stop_queue() and read of
  607. * fp->bd_tx_cons
  608. */
  609. smp_mb();
  610. if (qed_chain_get_elem_left(&txq->tx_pbl)
  611. >= (MAX_SKB_FRAGS + 1) &&
  612. (edev->state == QEDE_STATE_OPEN)) {
  613. netif_tx_wake_queue(netdev_txq);
  614. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  615. "Wake queue was called\n");
  616. }
  617. }
  618. return NETDEV_TX_OK;
  619. }
  620. int qede_txq_has_work(struct qede_tx_queue *txq)
  621. {
  622. u16 hw_bd_cons;
  623. /* Tell compiler that consumer and producer can change */
  624. barrier();
  625. hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
  626. if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
  627. return 0;
  628. return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
  629. }
  630. static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
  631. {
  632. struct netdev_queue *netdev_txq;
  633. u16 hw_bd_cons;
  634. unsigned int pkts_compl = 0, bytes_compl = 0;
  635. int rc;
  636. netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
  637. hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
  638. barrier();
  639. while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
  640. int len = 0;
  641. rc = qede_free_tx_pkt(edev, txq, &len);
  642. if (rc) {
  643. DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
  644. hw_bd_cons,
  645. qed_chain_get_cons_idx(&txq->tx_pbl));
  646. break;
  647. }
  648. bytes_compl += len;
  649. pkts_compl++;
  650. txq->sw_tx_cons++;
  651. txq->xmit_pkts++;
  652. }
  653. netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
  654. /* Need to make the tx_bd_cons update visible to start_xmit()
  655. * before checking for netif_tx_queue_stopped(). Without the
  656. * memory barrier, there is a small possibility that
  657. * start_xmit() will miss it and cause the queue to be stopped
  658. * forever.
  659. * On the other hand we need an rmb() here to ensure the proper
  660. * ordering of bit testing in the following
  661. * netif_tx_queue_stopped(txq) call.
  662. */
  663. smp_mb();
  664. if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
  665. /* Taking tx_lock is needed to prevent reenabling the queue
  666. * while it's empty. This could have happen if rx_action() gets
  667. * suspended in qede_tx_int() after the condition before
  668. * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
  669. *
  670. * stops the queue->sees fresh tx_bd_cons->releases the queue->
  671. * sends some packets consuming the whole queue again->
  672. * stops the queue
  673. */
  674. __netif_tx_lock(netdev_txq, smp_processor_id());
  675. if ((netif_tx_queue_stopped(netdev_txq)) &&
  676. (edev->state == QEDE_STATE_OPEN) &&
  677. (qed_chain_get_elem_left(&txq->tx_pbl)
  678. >= (MAX_SKB_FRAGS + 1))) {
  679. netif_tx_wake_queue(netdev_txq);
  680. DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
  681. "Wake queue was called\n");
  682. }
  683. __netif_tx_unlock(netdev_txq);
  684. }
  685. return 0;
  686. }
  687. bool qede_has_rx_work(struct qede_rx_queue *rxq)
  688. {
  689. u16 hw_comp_cons, sw_comp_cons;
  690. /* Tell compiler that status block fields can change */
  691. barrier();
  692. hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
  693. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  694. return hw_comp_cons != sw_comp_cons;
  695. }
  696. static bool qede_has_tx_work(struct qede_fastpath *fp)
  697. {
  698. u8 tc;
  699. for (tc = 0; tc < fp->edev->num_tc; tc++)
  700. if (qede_txq_has_work(&fp->txqs[tc]))
  701. return true;
  702. return false;
  703. }
  704. static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
  705. {
  706. qed_chain_consume(&rxq->rx_bd_ring);
  707. rxq->sw_rx_cons++;
  708. }
  709. /* This function reuses the buffer(from an offset) from
  710. * consumer index to producer index in the bd ring
  711. */
  712. static inline void qede_reuse_page(struct qede_dev *edev,
  713. struct qede_rx_queue *rxq,
  714. struct sw_rx_data *curr_cons)
  715. {
  716. struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
  717. struct sw_rx_data *curr_prod;
  718. dma_addr_t new_mapping;
  719. curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  720. *curr_prod = *curr_cons;
  721. new_mapping = curr_prod->mapping + curr_prod->page_offset;
  722. rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
  723. rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
  724. rxq->sw_rx_prod++;
  725. curr_cons->data = NULL;
  726. }
  727. /* In case of allocation failures reuse buffers
  728. * from consumer index to produce buffers for firmware
  729. */
  730. void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
  731. struct qede_dev *edev, u8 count)
  732. {
  733. struct sw_rx_data *curr_cons;
  734. for (; count > 0; count--) {
  735. curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
  736. qede_reuse_page(edev, rxq, curr_cons);
  737. qede_rx_bd_ring_consume(rxq);
  738. }
  739. }
  740. static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
  741. struct qede_rx_queue *rxq,
  742. struct sw_rx_data *curr_cons)
  743. {
  744. /* Move to the next segment in the page */
  745. curr_cons->page_offset += rxq->rx_buf_seg_size;
  746. if (curr_cons->page_offset == PAGE_SIZE) {
  747. if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
  748. /* Since we failed to allocate new buffer
  749. * current buffer can be used again.
  750. */
  751. curr_cons->page_offset -= rxq->rx_buf_seg_size;
  752. return -ENOMEM;
  753. }
  754. dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
  755. PAGE_SIZE, DMA_FROM_DEVICE);
  756. } else {
  757. /* Increment refcount of the page as we don't want
  758. * network stack to take the ownership of the page
  759. * which can be recycled multiple times by the driver.
  760. */
  761. page_ref_inc(curr_cons->data);
  762. qede_reuse_page(edev, rxq, curr_cons);
  763. }
  764. return 0;
  765. }
  766. static inline void qede_update_rx_prod(struct qede_dev *edev,
  767. struct qede_rx_queue *rxq)
  768. {
  769. u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
  770. u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
  771. struct eth_rx_prod_data rx_prods = {0};
  772. /* Update producers */
  773. rx_prods.bd_prod = cpu_to_le16(bd_prod);
  774. rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
  775. /* Make sure that the BD and SGE data is updated before updating the
  776. * producers since FW might read the BD/SGE right after the producer
  777. * is updated.
  778. */
  779. wmb();
  780. internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
  781. (u32 *)&rx_prods);
  782. /* mmiowb is needed to synchronize doorbell writes from more than one
  783. * processor. It guarantees that the write arrives to the device before
  784. * the napi lock is released and another qede_poll is called (possibly
  785. * on another CPU). Without this barrier, the next doorbell can bypass
  786. * this doorbell. This is applicable to IA64/Altix systems.
  787. */
  788. mmiowb();
  789. }
  790. static u32 qede_get_rxhash(struct qede_dev *edev,
  791. u8 bitfields,
  792. __le32 rss_hash, enum pkt_hash_types *rxhash_type)
  793. {
  794. enum rss_hash_type htype;
  795. htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
  796. if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
  797. *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
  798. (htype == RSS_HASH_TYPE_IPV6)) ?
  799. PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
  800. return le32_to_cpu(rss_hash);
  801. }
  802. *rxhash_type = PKT_HASH_TYPE_NONE;
  803. return 0;
  804. }
  805. static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
  806. {
  807. skb_checksum_none_assert(skb);
  808. if (csum_flag & QEDE_CSUM_UNNECESSARY)
  809. skb->ip_summed = CHECKSUM_UNNECESSARY;
  810. if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
  811. skb->csum_level = 1;
  812. }
  813. static inline void qede_skb_receive(struct qede_dev *edev,
  814. struct qede_fastpath *fp,
  815. struct sk_buff *skb, u16 vlan_tag)
  816. {
  817. if (vlan_tag)
  818. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
  819. napi_gro_receive(&fp->napi, skb);
  820. }
  821. static void qede_set_gro_params(struct qede_dev *edev,
  822. struct sk_buff *skb,
  823. struct eth_fast_path_rx_tpa_start_cqe *cqe)
  824. {
  825. u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
  826. if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
  827. PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
  828. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  829. else
  830. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  831. skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
  832. cqe->header_len;
  833. }
  834. static int qede_fill_frag_skb(struct qede_dev *edev,
  835. struct qede_rx_queue *rxq,
  836. u8 tpa_agg_index, u16 len_on_bd)
  837. {
  838. struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
  839. NUM_RX_BDS_MAX];
  840. struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
  841. struct sk_buff *skb = tpa_info->skb;
  842. if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
  843. goto out;
  844. /* Add one frag and update the appropriate fields in the skb */
  845. skb_fill_page_desc(skb, tpa_info->frag_id++,
  846. current_bd->data, current_bd->page_offset,
  847. len_on_bd);
  848. if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
  849. /* Incr page ref count to reuse on allocation failure
  850. * so that it doesn't get freed while freeing SKB.
  851. */
  852. page_ref_inc(current_bd->data);
  853. goto out;
  854. }
  855. qed_chain_consume(&rxq->rx_bd_ring);
  856. rxq->sw_rx_cons++;
  857. skb->data_len += len_on_bd;
  858. skb->truesize += rxq->rx_buf_seg_size;
  859. skb->len += len_on_bd;
  860. return 0;
  861. out:
  862. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  863. qede_recycle_rx_bd_ring(rxq, edev, 1);
  864. return -ENOMEM;
  865. }
  866. static void qede_tpa_start(struct qede_dev *edev,
  867. struct qede_rx_queue *rxq,
  868. struct eth_fast_path_rx_tpa_start_cqe *cqe)
  869. {
  870. struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
  871. struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
  872. struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
  873. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  874. dma_addr_t mapping = tpa_info->replace_buf_mapping;
  875. struct sw_rx_data *sw_rx_data_cons;
  876. struct sw_rx_data *sw_rx_data_prod;
  877. enum pkt_hash_types rxhash_type;
  878. u32 rxhash;
  879. sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
  880. sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  881. /* Use pre-allocated replacement buffer - we can't release the agg.
  882. * start until its over and we don't want to risk allocation failing
  883. * here, so re-allocate when aggregation will be over.
  884. */
  885. sw_rx_data_prod->mapping = replace_buf->mapping;
  886. sw_rx_data_prod->data = replace_buf->data;
  887. rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
  888. rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
  889. sw_rx_data_prod->page_offset = replace_buf->page_offset;
  890. rxq->sw_rx_prod++;
  891. /* move partial skb from cons to pool (don't unmap yet)
  892. * save mapping, incase we drop the packet later on.
  893. */
  894. tpa_info->start_buf = *sw_rx_data_cons;
  895. mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
  896. le32_to_cpu(rx_bd_cons->addr.lo));
  897. tpa_info->start_buf_mapping = mapping;
  898. rxq->sw_rx_cons++;
  899. /* set tpa state to start only if we are able to allocate skb
  900. * for this aggregation, otherwise mark as error and aggregation will
  901. * be dropped
  902. */
  903. tpa_info->skb = netdev_alloc_skb(edev->ndev,
  904. le16_to_cpu(cqe->len_on_first_bd));
  905. if (unlikely(!tpa_info->skb)) {
  906. DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
  907. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  908. goto cons_buf;
  909. }
  910. skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
  911. memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
  912. /* Start filling in the aggregation info */
  913. tpa_info->frag_id = 0;
  914. tpa_info->agg_state = QEDE_AGG_STATE_START;
  915. rxhash = qede_get_rxhash(edev, cqe->bitfields,
  916. cqe->rss_hash, &rxhash_type);
  917. skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
  918. if ((le16_to_cpu(cqe->pars_flags.flags) >>
  919. PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
  920. PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
  921. tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
  922. else
  923. tpa_info->vlan_tag = 0;
  924. /* This is needed in order to enable forwarding support */
  925. qede_set_gro_params(edev, tpa_info->skb, cqe);
  926. cons_buf: /* We still need to handle bd_len_list to consume buffers */
  927. if (likely(cqe->ext_bd_len_list[0]))
  928. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  929. le16_to_cpu(cqe->ext_bd_len_list[0]));
  930. if (unlikely(cqe->ext_bd_len_list[1])) {
  931. DP_ERR(edev,
  932. "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
  933. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  934. }
  935. }
  936. #ifdef CONFIG_INET
  937. static void qede_gro_ip_csum(struct sk_buff *skb)
  938. {
  939. const struct iphdr *iph = ip_hdr(skb);
  940. struct tcphdr *th;
  941. skb_set_transport_header(skb, sizeof(struct iphdr));
  942. th = tcp_hdr(skb);
  943. th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
  944. iph->saddr, iph->daddr, 0);
  945. tcp_gro_complete(skb);
  946. }
  947. static void qede_gro_ipv6_csum(struct sk_buff *skb)
  948. {
  949. struct ipv6hdr *iph = ipv6_hdr(skb);
  950. struct tcphdr *th;
  951. skb_set_transport_header(skb, sizeof(struct ipv6hdr));
  952. th = tcp_hdr(skb);
  953. th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
  954. &iph->saddr, &iph->daddr, 0);
  955. tcp_gro_complete(skb);
  956. }
  957. #endif
  958. static void qede_gro_receive(struct qede_dev *edev,
  959. struct qede_fastpath *fp,
  960. struct sk_buff *skb,
  961. u16 vlan_tag)
  962. {
  963. /* FW can send a single MTU sized packet from gro flow
  964. * due to aggregation timeout/last segment etc. which
  965. * is not expected to be a gro packet. If a skb has zero
  966. * frags then simply push it in the stack as non gso skb.
  967. */
  968. if (unlikely(!skb->data_len)) {
  969. skb_shinfo(skb)->gso_type = 0;
  970. skb_shinfo(skb)->gso_size = 0;
  971. goto send_skb;
  972. }
  973. #ifdef CONFIG_INET
  974. if (skb_shinfo(skb)->gso_size) {
  975. skb_set_network_header(skb, 0);
  976. switch (skb->protocol) {
  977. case htons(ETH_P_IP):
  978. qede_gro_ip_csum(skb);
  979. break;
  980. case htons(ETH_P_IPV6):
  981. qede_gro_ipv6_csum(skb);
  982. break;
  983. default:
  984. DP_ERR(edev,
  985. "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
  986. ntohs(skb->protocol));
  987. }
  988. }
  989. #endif
  990. send_skb:
  991. skb_record_rx_queue(skb, fp->rxq->rxq_id);
  992. qede_skb_receive(edev, fp, skb, vlan_tag);
  993. }
  994. static inline void qede_tpa_cont(struct qede_dev *edev,
  995. struct qede_rx_queue *rxq,
  996. struct eth_fast_path_rx_tpa_cont_cqe *cqe)
  997. {
  998. int i;
  999. for (i = 0; cqe->len_list[i]; i++)
  1000. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  1001. le16_to_cpu(cqe->len_list[i]));
  1002. if (unlikely(i > 1))
  1003. DP_ERR(edev,
  1004. "Strange - TPA cont with more than a single len_list entry\n");
  1005. }
  1006. static void qede_tpa_end(struct qede_dev *edev,
  1007. struct qede_fastpath *fp,
  1008. struct eth_fast_path_rx_tpa_end_cqe *cqe)
  1009. {
  1010. struct qede_rx_queue *rxq = fp->rxq;
  1011. struct qede_agg_info *tpa_info;
  1012. struct sk_buff *skb;
  1013. int i;
  1014. tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
  1015. skb = tpa_info->skb;
  1016. for (i = 0; cqe->len_list[i]; i++)
  1017. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  1018. le16_to_cpu(cqe->len_list[i]));
  1019. if (unlikely(i > 1))
  1020. DP_ERR(edev,
  1021. "Strange - TPA emd with more than a single len_list entry\n");
  1022. if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
  1023. goto err;
  1024. /* Sanity */
  1025. if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
  1026. DP_ERR(edev,
  1027. "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
  1028. cqe->num_of_bds, tpa_info->frag_id);
  1029. if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
  1030. DP_ERR(edev,
  1031. "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
  1032. le16_to_cpu(cqe->total_packet_len), skb->len);
  1033. memcpy(skb->data,
  1034. page_address(tpa_info->start_buf.data) +
  1035. tpa_info->start_cqe.placement_offset +
  1036. tpa_info->start_buf.page_offset,
  1037. le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
  1038. /* Recycle [mapped] start buffer for the next replacement */
  1039. tpa_info->replace_buf = tpa_info->start_buf;
  1040. tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
  1041. /* Finalize the SKB */
  1042. skb->protocol = eth_type_trans(skb, edev->ndev);
  1043. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1044. /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
  1045. * to skb_shinfo(skb)->gso_segs
  1046. */
  1047. NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
  1048. qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
  1049. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  1050. return;
  1051. err:
  1052. /* The BD starting the aggregation is still mapped; Re-use it for
  1053. * future aggregations [as replacement buffer]
  1054. */
  1055. memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
  1056. sizeof(struct sw_rx_data));
  1057. tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
  1058. tpa_info->start_buf.data = NULL;
  1059. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  1060. dev_kfree_skb_any(tpa_info->skb);
  1061. tpa_info->skb = NULL;
  1062. }
  1063. static bool qede_tunn_exist(u16 flag)
  1064. {
  1065. return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
  1066. PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
  1067. }
  1068. static u8 qede_check_tunn_csum(u16 flag)
  1069. {
  1070. u16 csum_flag = 0;
  1071. u8 tcsum = 0;
  1072. if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
  1073. PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
  1074. csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
  1075. PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
  1076. if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
  1077. PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
  1078. csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
  1079. PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
  1080. tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
  1081. }
  1082. csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
  1083. PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
  1084. PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
  1085. PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
  1086. if (csum_flag & flag)
  1087. return QEDE_CSUM_ERROR;
  1088. return QEDE_CSUM_UNNECESSARY | tcsum;
  1089. }
  1090. static u8 qede_check_notunn_csum(u16 flag)
  1091. {
  1092. u16 csum_flag = 0;
  1093. u8 csum = 0;
  1094. if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
  1095. PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
  1096. csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
  1097. PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
  1098. csum = QEDE_CSUM_UNNECESSARY;
  1099. }
  1100. csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
  1101. PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
  1102. if (csum_flag & flag)
  1103. return QEDE_CSUM_ERROR;
  1104. return csum;
  1105. }
  1106. static u8 qede_check_csum(u16 flag)
  1107. {
  1108. if (!qede_tunn_exist(flag))
  1109. return qede_check_notunn_csum(flag);
  1110. else
  1111. return qede_check_tunn_csum(flag);
  1112. }
  1113. static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
  1114. u16 flag)
  1115. {
  1116. u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
  1117. if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
  1118. ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
  1119. (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
  1120. PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
  1121. return true;
  1122. return false;
  1123. }
  1124. static int qede_rx_int(struct qede_fastpath *fp, int budget)
  1125. {
  1126. struct qede_dev *edev = fp->edev;
  1127. struct qede_rx_queue *rxq = fp->rxq;
  1128. u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
  1129. int rx_pkt = 0;
  1130. u8 csum_flag;
  1131. hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
  1132. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  1133. /* Memory barrier to prevent the CPU from doing speculative reads of CQE
  1134. * / BD in the while-loop before reading hw_comp_cons. If the CQE is
  1135. * read before it is written by FW, then FW writes CQE and SB, and then
  1136. * the CPU reads the hw_comp_cons, it will use an old CQE.
  1137. */
  1138. rmb();
  1139. /* Loop to complete all indicated BDs */
  1140. while (sw_comp_cons != hw_comp_cons) {
  1141. struct eth_fast_path_rx_reg_cqe *fp_cqe;
  1142. enum pkt_hash_types rxhash_type;
  1143. enum eth_rx_cqe_type cqe_type;
  1144. struct sw_rx_data *sw_rx_data;
  1145. union eth_rx_cqe *cqe;
  1146. struct sk_buff *skb;
  1147. struct page *data;
  1148. __le16 flags;
  1149. u16 len, pad;
  1150. u32 rx_hash;
  1151. /* Get the CQE from the completion ring */
  1152. cqe = (union eth_rx_cqe *)
  1153. qed_chain_consume(&rxq->rx_comp_ring);
  1154. cqe_type = cqe->fast_path_regular.type;
  1155. if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
  1156. edev->ops->eth_cqe_completion(
  1157. edev->cdev, fp->id,
  1158. (struct eth_slow_path_rx_cqe *)cqe);
  1159. goto next_cqe;
  1160. }
  1161. if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
  1162. switch (cqe_type) {
  1163. case ETH_RX_CQE_TYPE_TPA_START:
  1164. qede_tpa_start(edev, rxq,
  1165. &cqe->fast_path_tpa_start);
  1166. goto next_cqe;
  1167. case ETH_RX_CQE_TYPE_TPA_CONT:
  1168. qede_tpa_cont(edev, rxq,
  1169. &cqe->fast_path_tpa_cont);
  1170. goto next_cqe;
  1171. case ETH_RX_CQE_TYPE_TPA_END:
  1172. qede_tpa_end(edev, fp,
  1173. &cqe->fast_path_tpa_end);
  1174. goto next_rx_only;
  1175. default:
  1176. break;
  1177. }
  1178. }
  1179. /* Get the data from the SW ring */
  1180. sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
  1181. sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
  1182. data = sw_rx_data->data;
  1183. fp_cqe = &cqe->fast_path_regular;
  1184. len = le16_to_cpu(fp_cqe->len_on_first_bd);
  1185. pad = fp_cqe->placement_offset;
  1186. flags = cqe->fast_path_regular.pars_flags.flags;
  1187. /* If this is an error packet then drop it */
  1188. parse_flag = le16_to_cpu(flags);
  1189. csum_flag = qede_check_csum(parse_flag);
  1190. if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
  1191. if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
  1192. parse_flag)) {
  1193. rxq->rx_ip_frags++;
  1194. goto alloc_skb;
  1195. }
  1196. DP_NOTICE(edev,
  1197. "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
  1198. sw_comp_cons, parse_flag);
  1199. rxq->rx_hw_errors++;
  1200. qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
  1201. goto next_cqe;
  1202. }
  1203. alloc_skb:
  1204. skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
  1205. if (unlikely(!skb)) {
  1206. DP_NOTICE(edev,
  1207. "skb allocation failed, dropping incoming packet\n");
  1208. qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
  1209. rxq->rx_alloc_errors++;
  1210. goto next_cqe;
  1211. }
  1212. /* Copy data into SKB */
  1213. if (len + pad <= edev->rx_copybreak) {
  1214. memcpy(skb_put(skb, len),
  1215. page_address(data) + pad +
  1216. sw_rx_data->page_offset, len);
  1217. qede_reuse_page(edev, rxq, sw_rx_data);
  1218. } else {
  1219. struct skb_frag_struct *frag;
  1220. unsigned int pull_len;
  1221. unsigned char *va;
  1222. frag = &skb_shinfo(skb)->frags[0];
  1223. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
  1224. pad + sw_rx_data->page_offset,
  1225. len, rxq->rx_buf_seg_size);
  1226. va = skb_frag_address(frag);
  1227. pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
  1228. /* Align the pull_len to optimize memcpy */
  1229. memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
  1230. skb_frag_size_sub(frag, pull_len);
  1231. frag->page_offset += pull_len;
  1232. skb->data_len -= pull_len;
  1233. skb->tail += pull_len;
  1234. if (unlikely(qede_realloc_rx_buffer(edev, rxq,
  1235. sw_rx_data))) {
  1236. DP_ERR(edev, "Failed to allocate rx buffer\n");
  1237. /* Incr page ref count to reuse on allocation
  1238. * failure so that it doesn't get freed while
  1239. * freeing SKB.
  1240. */
  1241. page_ref_inc(sw_rx_data->data);
  1242. rxq->rx_alloc_errors++;
  1243. qede_recycle_rx_bd_ring(rxq, edev,
  1244. fp_cqe->bd_num);
  1245. dev_kfree_skb_any(skb);
  1246. goto next_cqe;
  1247. }
  1248. }
  1249. qede_rx_bd_ring_consume(rxq);
  1250. if (fp_cqe->bd_num != 1) {
  1251. u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
  1252. u8 num_frags;
  1253. pkt_len -= len;
  1254. for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
  1255. num_frags--) {
  1256. u16 cur_size = pkt_len > rxq->rx_buf_size ?
  1257. rxq->rx_buf_size : pkt_len;
  1258. if (unlikely(!cur_size)) {
  1259. DP_ERR(edev,
  1260. "Still got %d BDs for mapping jumbo, but length became 0\n",
  1261. num_frags);
  1262. qede_recycle_rx_bd_ring(rxq, edev,
  1263. num_frags);
  1264. dev_kfree_skb_any(skb);
  1265. goto next_cqe;
  1266. }
  1267. if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
  1268. qede_recycle_rx_bd_ring(rxq, edev,
  1269. num_frags);
  1270. dev_kfree_skb_any(skb);
  1271. goto next_cqe;
  1272. }
  1273. sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
  1274. sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
  1275. qede_rx_bd_ring_consume(rxq);
  1276. dma_unmap_page(&edev->pdev->dev,
  1277. sw_rx_data->mapping,
  1278. PAGE_SIZE, DMA_FROM_DEVICE);
  1279. skb_fill_page_desc(skb,
  1280. skb_shinfo(skb)->nr_frags++,
  1281. sw_rx_data->data, 0,
  1282. cur_size);
  1283. skb->truesize += PAGE_SIZE;
  1284. skb->data_len += cur_size;
  1285. skb->len += cur_size;
  1286. pkt_len -= cur_size;
  1287. }
  1288. if (unlikely(pkt_len))
  1289. DP_ERR(edev,
  1290. "Mapped all BDs of jumbo, but still have %d bytes\n",
  1291. pkt_len);
  1292. }
  1293. skb->protocol = eth_type_trans(skb, edev->ndev);
  1294. rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
  1295. fp_cqe->rss_hash, &rxhash_type);
  1296. skb_set_hash(skb, rx_hash, rxhash_type);
  1297. qede_set_skb_csum(skb, csum_flag);
  1298. skb_record_rx_queue(skb, fp->rxq->rxq_id);
  1299. qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
  1300. next_rx_only:
  1301. rx_pkt++;
  1302. next_cqe: /* don't consume bd rx buffer */
  1303. qed_chain_recycle_consumed(&rxq->rx_comp_ring);
  1304. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  1305. /* CR TPA - revisit how to handle budget in TPA perhaps
  1306. * increase on "end"
  1307. */
  1308. if (rx_pkt == budget)
  1309. break;
  1310. } /* repeat while sw_comp_cons != hw_comp_cons... */
  1311. /* Update producers */
  1312. qede_update_rx_prod(edev, rxq);
  1313. rxq->rcv_pkts += rx_pkt;
  1314. return rx_pkt;
  1315. }
  1316. static int qede_poll(struct napi_struct *napi, int budget)
  1317. {
  1318. struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
  1319. napi);
  1320. struct qede_dev *edev = fp->edev;
  1321. int rx_work_done = 0;
  1322. u8 tc;
  1323. for (tc = 0; tc < edev->num_tc; tc++)
  1324. if (likely(fp->type & QEDE_FASTPATH_TX) &&
  1325. qede_txq_has_work(&fp->txqs[tc]))
  1326. qede_tx_int(edev, &fp->txqs[tc]);
  1327. rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
  1328. qede_has_rx_work(fp->rxq)) ?
  1329. qede_rx_int(fp, budget) : 0;
  1330. if (rx_work_done < budget) {
  1331. qed_sb_update_sb_idx(fp->sb_info);
  1332. /* *_has_*_work() reads the status block,
  1333. * thus we need to ensure that status block indices
  1334. * have been actually read (qed_sb_update_sb_idx)
  1335. * prior to this check (*_has_*_work) so that
  1336. * we won't write the "newer" value of the status block
  1337. * to HW (if there was a DMA right after
  1338. * qede_has_rx_work and if there is no rmb, the memory
  1339. * reading (qed_sb_update_sb_idx) may be postponed
  1340. * to right before *_ack_sb). In this case there
  1341. * will never be another interrupt until there is
  1342. * another update of the status block, while there
  1343. * is still unhandled work.
  1344. */
  1345. rmb();
  1346. /* Fall out from the NAPI loop if needed */
  1347. if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
  1348. qede_has_rx_work(fp->rxq)) ||
  1349. (likely(fp->type & QEDE_FASTPATH_TX) &&
  1350. qede_has_tx_work(fp)))) {
  1351. napi_complete(napi);
  1352. /* Update and reenable interrupts */
  1353. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
  1354. 1 /*update*/);
  1355. } else {
  1356. rx_work_done = budget;
  1357. }
  1358. }
  1359. return rx_work_done;
  1360. }
  1361. static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
  1362. {
  1363. struct qede_fastpath *fp = fp_cookie;
  1364. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
  1365. napi_schedule_irqoff(&fp->napi);
  1366. return IRQ_HANDLED;
  1367. }
  1368. /* -------------------------------------------------------------------------
  1369. * END OF FAST-PATH
  1370. * -------------------------------------------------------------------------
  1371. */
  1372. static int qede_open(struct net_device *ndev);
  1373. static int qede_close(struct net_device *ndev);
  1374. static int qede_set_mac_addr(struct net_device *ndev, void *p);
  1375. static void qede_set_rx_mode(struct net_device *ndev);
  1376. static void qede_config_rx_mode(struct net_device *ndev);
  1377. static int qede_set_ucast_rx_mac(struct qede_dev *edev,
  1378. enum qed_filter_xcast_params_type opcode,
  1379. unsigned char mac[ETH_ALEN])
  1380. {
  1381. struct qed_filter_params filter_cmd;
  1382. memset(&filter_cmd, 0, sizeof(filter_cmd));
  1383. filter_cmd.type = QED_FILTER_TYPE_UCAST;
  1384. filter_cmd.filter.ucast.type = opcode;
  1385. filter_cmd.filter.ucast.mac_valid = 1;
  1386. ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
  1387. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  1388. }
  1389. static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
  1390. enum qed_filter_xcast_params_type opcode,
  1391. u16 vid)
  1392. {
  1393. struct qed_filter_params filter_cmd;
  1394. memset(&filter_cmd, 0, sizeof(filter_cmd));
  1395. filter_cmd.type = QED_FILTER_TYPE_UCAST;
  1396. filter_cmd.filter.ucast.type = opcode;
  1397. filter_cmd.filter.ucast.vlan_valid = 1;
  1398. filter_cmd.filter.ucast.vlan = vid;
  1399. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  1400. }
  1401. void qede_fill_by_demand_stats(struct qede_dev *edev)
  1402. {
  1403. struct qed_eth_stats stats;
  1404. edev->ops->get_vport_stats(edev->cdev, &stats);
  1405. edev->stats.no_buff_discards = stats.no_buff_discards;
  1406. edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
  1407. edev->stats.ttl0_discard = stats.ttl0_discard;
  1408. edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
  1409. edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
  1410. edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
  1411. edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
  1412. edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
  1413. edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
  1414. edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
  1415. edev->stats.mac_filter_discards = stats.mac_filter_discards;
  1416. edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
  1417. edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
  1418. edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
  1419. edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
  1420. edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
  1421. edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
  1422. edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
  1423. edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
  1424. edev->stats.coalesced_events = stats.tpa_coalesced_events;
  1425. edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
  1426. edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
  1427. edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
  1428. edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
  1429. edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
  1430. edev->stats.rx_128_to_255_byte_packets =
  1431. stats.rx_128_to_255_byte_packets;
  1432. edev->stats.rx_256_to_511_byte_packets =
  1433. stats.rx_256_to_511_byte_packets;
  1434. edev->stats.rx_512_to_1023_byte_packets =
  1435. stats.rx_512_to_1023_byte_packets;
  1436. edev->stats.rx_1024_to_1518_byte_packets =
  1437. stats.rx_1024_to_1518_byte_packets;
  1438. edev->stats.rx_1519_to_1522_byte_packets =
  1439. stats.rx_1519_to_1522_byte_packets;
  1440. edev->stats.rx_1519_to_2047_byte_packets =
  1441. stats.rx_1519_to_2047_byte_packets;
  1442. edev->stats.rx_2048_to_4095_byte_packets =
  1443. stats.rx_2048_to_4095_byte_packets;
  1444. edev->stats.rx_4096_to_9216_byte_packets =
  1445. stats.rx_4096_to_9216_byte_packets;
  1446. edev->stats.rx_9217_to_16383_byte_packets =
  1447. stats.rx_9217_to_16383_byte_packets;
  1448. edev->stats.rx_crc_errors = stats.rx_crc_errors;
  1449. edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
  1450. edev->stats.rx_pause_frames = stats.rx_pause_frames;
  1451. edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
  1452. edev->stats.rx_align_errors = stats.rx_align_errors;
  1453. edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
  1454. edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
  1455. edev->stats.rx_jabbers = stats.rx_jabbers;
  1456. edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
  1457. edev->stats.rx_fragments = stats.rx_fragments;
  1458. edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
  1459. edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
  1460. edev->stats.tx_128_to_255_byte_packets =
  1461. stats.tx_128_to_255_byte_packets;
  1462. edev->stats.tx_256_to_511_byte_packets =
  1463. stats.tx_256_to_511_byte_packets;
  1464. edev->stats.tx_512_to_1023_byte_packets =
  1465. stats.tx_512_to_1023_byte_packets;
  1466. edev->stats.tx_1024_to_1518_byte_packets =
  1467. stats.tx_1024_to_1518_byte_packets;
  1468. edev->stats.tx_1519_to_2047_byte_packets =
  1469. stats.tx_1519_to_2047_byte_packets;
  1470. edev->stats.tx_2048_to_4095_byte_packets =
  1471. stats.tx_2048_to_4095_byte_packets;
  1472. edev->stats.tx_4096_to_9216_byte_packets =
  1473. stats.tx_4096_to_9216_byte_packets;
  1474. edev->stats.tx_9217_to_16383_byte_packets =
  1475. stats.tx_9217_to_16383_byte_packets;
  1476. edev->stats.tx_pause_frames = stats.tx_pause_frames;
  1477. edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
  1478. edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
  1479. edev->stats.tx_total_collisions = stats.tx_total_collisions;
  1480. edev->stats.brb_truncates = stats.brb_truncates;
  1481. edev->stats.brb_discards = stats.brb_discards;
  1482. edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
  1483. }
  1484. static
  1485. struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
  1486. struct rtnl_link_stats64 *stats)
  1487. {
  1488. struct qede_dev *edev = netdev_priv(dev);
  1489. qede_fill_by_demand_stats(edev);
  1490. stats->rx_packets = edev->stats.rx_ucast_pkts +
  1491. edev->stats.rx_mcast_pkts +
  1492. edev->stats.rx_bcast_pkts;
  1493. stats->tx_packets = edev->stats.tx_ucast_pkts +
  1494. edev->stats.tx_mcast_pkts +
  1495. edev->stats.tx_bcast_pkts;
  1496. stats->rx_bytes = edev->stats.rx_ucast_bytes +
  1497. edev->stats.rx_mcast_bytes +
  1498. edev->stats.rx_bcast_bytes;
  1499. stats->tx_bytes = edev->stats.tx_ucast_bytes +
  1500. edev->stats.tx_mcast_bytes +
  1501. edev->stats.tx_bcast_bytes;
  1502. stats->tx_errors = edev->stats.tx_err_drop_pkts;
  1503. stats->multicast = edev->stats.rx_mcast_pkts +
  1504. edev->stats.rx_bcast_pkts;
  1505. stats->rx_fifo_errors = edev->stats.no_buff_discards;
  1506. stats->collisions = edev->stats.tx_total_collisions;
  1507. stats->rx_crc_errors = edev->stats.rx_crc_errors;
  1508. stats->rx_frame_errors = edev->stats.rx_align_errors;
  1509. return stats;
  1510. }
  1511. #ifdef CONFIG_QED_SRIOV
  1512. static int qede_get_vf_config(struct net_device *dev, int vfidx,
  1513. struct ifla_vf_info *ivi)
  1514. {
  1515. struct qede_dev *edev = netdev_priv(dev);
  1516. if (!edev->ops)
  1517. return -EINVAL;
  1518. return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
  1519. }
  1520. static int qede_set_vf_rate(struct net_device *dev, int vfidx,
  1521. int min_tx_rate, int max_tx_rate)
  1522. {
  1523. struct qede_dev *edev = netdev_priv(dev);
  1524. return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
  1525. max_tx_rate);
  1526. }
  1527. static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
  1528. {
  1529. struct qede_dev *edev = netdev_priv(dev);
  1530. if (!edev->ops)
  1531. return -EINVAL;
  1532. return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
  1533. }
  1534. static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
  1535. int link_state)
  1536. {
  1537. struct qede_dev *edev = netdev_priv(dev);
  1538. if (!edev->ops)
  1539. return -EINVAL;
  1540. return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
  1541. }
  1542. #endif
  1543. static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
  1544. {
  1545. struct qed_update_vport_params params;
  1546. int rc;
  1547. /* Proceed only if action actually needs to be performed */
  1548. if (edev->accept_any_vlan == action)
  1549. return;
  1550. memset(&params, 0, sizeof(params));
  1551. params.vport_id = 0;
  1552. params.accept_any_vlan = action;
  1553. params.update_accept_any_vlan_flg = 1;
  1554. rc = edev->ops->vport_update(edev->cdev, &params);
  1555. if (rc) {
  1556. DP_ERR(edev, "Failed to %s accept-any-vlan\n",
  1557. action ? "enable" : "disable");
  1558. } else {
  1559. DP_INFO(edev, "%s accept-any-vlan\n",
  1560. action ? "enabled" : "disabled");
  1561. edev->accept_any_vlan = action;
  1562. }
  1563. }
  1564. static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
  1565. {
  1566. struct qede_dev *edev = netdev_priv(dev);
  1567. struct qede_vlan *vlan, *tmp;
  1568. int rc;
  1569. DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
  1570. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  1571. if (!vlan) {
  1572. DP_INFO(edev, "Failed to allocate struct for vlan\n");
  1573. return -ENOMEM;
  1574. }
  1575. INIT_LIST_HEAD(&vlan->list);
  1576. vlan->vid = vid;
  1577. vlan->configured = false;
  1578. /* Verify vlan isn't already configured */
  1579. list_for_each_entry(tmp, &edev->vlan_list, list) {
  1580. if (tmp->vid == vlan->vid) {
  1581. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  1582. "vlan already configured\n");
  1583. kfree(vlan);
  1584. return -EEXIST;
  1585. }
  1586. }
  1587. /* If interface is down, cache this VLAN ID and return */
  1588. if (edev->state != QEDE_STATE_OPEN) {
  1589. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1590. "Interface is down, VLAN %d will be configured when interface is up\n",
  1591. vid);
  1592. if (vid != 0)
  1593. edev->non_configured_vlans++;
  1594. list_add(&vlan->list, &edev->vlan_list);
  1595. return 0;
  1596. }
  1597. /* Check for the filter limit.
  1598. * Note - vlan0 has a reserved filter and can be added without
  1599. * worrying about quota
  1600. */
  1601. if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
  1602. (vlan->vid == 0)) {
  1603. rc = qede_set_ucast_rx_vlan(edev,
  1604. QED_FILTER_XCAST_TYPE_ADD,
  1605. vlan->vid);
  1606. if (rc) {
  1607. DP_ERR(edev, "Failed to configure VLAN %d\n",
  1608. vlan->vid);
  1609. kfree(vlan);
  1610. return -EINVAL;
  1611. }
  1612. vlan->configured = true;
  1613. /* vlan0 filter isn't consuming out of our quota */
  1614. if (vlan->vid != 0)
  1615. edev->configured_vlans++;
  1616. } else {
  1617. /* Out of quota; Activate accept-any-VLAN mode */
  1618. if (!edev->non_configured_vlans)
  1619. qede_config_accept_any_vlan(edev, true);
  1620. edev->non_configured_vlans++;
  1621. }
  1622. list_add(&vlan->list, &edev->vlan_list);
  1623. return 0;
  1624. }
  1625. static void qede_del_vlan_from_list(struct qede_dev *edev,
  1626. struct qede_vlan *vlan)
  1627. {
  1628. /* vlan0 filter isn't consuming out of our quota */
  1629. if (vlan->vid != 0) {
  1630. if (vlan->configured)
  1631. edev->configured_vlans--;
  1632. else
  1633. edev->non_configured_vlans--;
  1634. }
  1635. list_del(&vlan->list);
  1636. kfree(vlan);
  1637. }
  1638. static int qede_configure_vlan_filters(struct qede_dev *edev)
  1639. {
  1640. int rc = 0, real_rc = 0, accept_any_vlan = 0;
  1641. struct qed_dev_eth_info *dev_info;
  1642. struct qede_vlan *vlan = NULL;
  1643. if (list_empty(&edev->vlan_list))
  1644. return 0;
  1645. dev_info = &edev->dev_info;
  1646. /* Configure non-configured vlans */
  1647. list_for_each_entry(vlan, &edev->vlan_list, list) {
  1648. if (vlan->configured)
  1649. continue;
  1650. /* We have used all our credits, now enable accept_any_vlan */
  1651. if ((vlan->vid != 0) &&
  1652. (edev->configured_vlans == dev_info->num_vlan_filters)) {
  1653. accept_any_vlan = 1;
  1654. continue;
  1655. }
  1656. DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
  1657. rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
  1658. vlan->vid);
  1659. if (rc) {
  1660. DP_ERR(edev, "Failed to configure VLAN %u\n",
  1661. vlan->vid);
  1662. real_rc = rc;
  1663. continue;
  1664. }
  1665. vlan->configured = true;
  1666. /* vlan0 filter doesn't consume our VLAN filter's quota */
  1667. if (vlan->vid != 0) {
  1668. edev->non_configured_vlans--;
  1669. edev->configured_vlans++;
  1670. }
  1671. }
  1672. /* enable accept_any_vlan mode if we have more VLANs than credits,
  1673. * or remove accept_any_vlan mode if we've actually removed
  1674. * a non-configured vlan, and all remaining vlans are truly configured.
  1675. */
  1676. if (accept_any_vlan)
  1677. qede_config_accept_any_vlan(edev, true);
  1678. else if (!edev->non_configured_vlans)
  1679. qede_config_accept_any_vlan(edev, false);
  1680. return real_rc;
  1681. }
  1682. static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
  1683. {
  1684. struct qede_dev *edev = netdev_priv(dev);
  1685. struct qede_vlan *vlan = NULL;
  1686. int rc;
  1687. DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
  1688. /* Find whether entry exists */
  1689. list_for_each_entry(vlan, &edev->vlan_list, list)
  1690. if (vlan->vid == vid)
  1691. break;
  1692. if (!vlan || (vlan->vid != vid)) {
  1693. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  1694. "Vlan isn't configured\n");
  1695. return 0;
  1696. }
  1697. if (edev->state != QEDE_STATE_OPEN) {
  1698. /* As interface is already down, we don't have a VPORT
  1699. * instance to remove vlan filter. So just update vlan list
  1700. */
  1701. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1702. "Interface is down, removing VLAN from list only\n");
  1703. qede_del_vlan_from_list(edev, vlan);
  1704. return 0;
  1705. }
  1706. /* Remove vlan */
  1707. if (vlan->configured) {
  1708. rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
  1709. vid);
  1710. if (rc) {
  1711. DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
  1712. return -EINVAL;
  1713. }
  1714. }
  1715. qede_del_vlan_from_list(edev, vlan);
  1716. /* We have removed a VLAN - try to see if we can
  1717. * configure non-configured VLAN from the list.
  1718. */
  1719. rc = qede_configure_vlan_filters(edev);
  1720. return rc;
  1721. }
  1722. static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
  1723. {
  1724. struct qede_vlan *vlan = NULL;
  1725. if (list_empty(&edev->vlan_list))
  1726. return;
  1727. list_for_each_entry(vlan, &edev->vlan_list, list) {
  1728. if (!vlan->configured)
  1729. continue;
  1730. vlan->configured = false;
  1731. /* vlan0 filter isn't consuming out of our quota */
  1732. if (vlan->vid != 0) {
  1733. edev->non_configured_vlans++;
  1734. edev->configured_vlans--;
  1735. }
  1736. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1737. "marked vlan %d as non-configured\n", vlan->vid);
  1738. }
  1739. edev->accept_any_vlan = false;
  1740. }
  1741. static int qede_set_features(struct net_device *dev, netdev_features_t features)
  1742. {
  1743. struct qede_dev *edev = netdev_priv(dev);
  1744. netdev_features_t changes = features ^ dev->features;
  1745. bool need_reload = false;
  1746. /* No action needed if hardware GRO is disabled during driver load */
  1747. if (changes & NETIF_F_GRO) {
  1748. if (dev->features & NETIF_F_GRO)
  1749. need_reload = !edev->gro_disable;
  1750. else
  1751. need_reload = edev->gro_disable;
  1752. }
  1753. if (need_reload && netif_running(edev->ndev)) {
  1754. dev->features = features;
  1755. qede_reload(edev, NULL, NULL);
  1756. return 1;
  1757. }
  1758. return 0;
  1759. }
  1760. static void qede_udp_tunnel_add(struct net_device *dev,
  1761. struct udp_tunnel_info *ti)
  1762. {
  1763. struct qede_dev *edev = netdev_priv(dev);
  1764. u16 t_port = ntohs(ti->port);
  1765. switch (ti->type) {
  1766. case UDP_TUNNEL_TYPE_VXLAN:
  1767. if (edev->vxlan_dst_port)
  1768. return;
  1769. edev->vxlan_dst_port = t_port;
  1770. DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
  1771. t_port);
  1772. set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
  1773. break;
  1774. case UDP_TUNNEL_TYPE_GENEVE:
  1775. if (edev->geneve_dst_port)
  1776. return;
  1777. edev->geneve_dst_port = t_port;
  1778. DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
  1779. t_port);
  1780. set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
  1781. break;
  1782. default:
  1783. return;
  1784. }
  1785. schedule_delayed_work(&edev->sp_task, 0);
  1786. }
  1787. static void qede_udp_tunnel_del(struct net_device *dev,
  1788. struct udp_tunnel_info *ti)
  1789. {
  1790. struct qede_dev *edev = netdev_priv(dev);
  1791. u16 t_port = ntohs(ti->port);
  1792. switch (ti->type) {
  1793. case UDP_TUNNEL_TYPE_VXLAN:
  1794. if (t_port != edev->vxlan_dst_port)
  1795. return;
  1796. edev->vxlan_dst_port = 0;
  1797. DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
  1798. t_port);
  1799. set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
  1800. break;
  1801. case UDP_TUNNEL_TYPE_GENEVE:
  1802. if (t_port != edev->geneve_dst_port)
  1803. return;
  1804. edev->geneve_dst_port = 0;
  1805. DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
  1806. t_port);
  1807. set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
  1808. break;
  1809. default:
  1810. return;
  1811. }
  1812. schedule_delayed_work(&edev->sp_task, 0);
  1813. }
  1814. static const struct net_device_ops qede_netdev_ops = {
  1815. .ndo_open = qede_open,
  1816. .ndo_stop = qede_close,
  1817. .ndo_start_xmit = qede_start_xmit,
  1818. .ndo_set_rx_mode = qede_set_rx_mode,
  1819. .ndo_set_mac_address = qede_set_mac_addr,
  1820. .ndo_validate_addr = eth_validate_addr,
  1821. .ndo_change_mtu = qede_change_mtu,
  1822. #ifdef CONFIG_QED_SRIOV
  1823. .ndo_set_vf_mac = qede_set_vf_mac,
  1824. .ndo_set_vf_vlan = qede_set_vf_vlan,
  1825. #endif
  1826. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  1827. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  1828. .ndo_set_features = qede_set_features,
  1829. .ndo_get_stats64 = qede_get_stats64,
  1830. #ifdef CONFIG_QED_SRIOV
  1831. .ndo_set_vf_link_state = qede_set_vf_link_state,
  1832. .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
  1833. .ndo_get_vf_config = qede_get_vf_config,
  1834. .ndo_set_vf_rate = qede_set_vf_rate,
  1835. #endif
  1836. .ndo_udp_tunnel_add = qede_udp_tunnel_add,
  1837. .ndo_udp_tunnel_del = qede_udp_tunnel_del,
  1838. };
  1839. /* -------------------------------------------------------------------------
  1840. * START OF PROBE / REMOVE
  1841. * -------------------------------------------------------------------------
  1842. */
  1843. static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
  1844. struct pci_dev *pdev,
  1845. struct qed_dev_eth_info *info,
  1846. u32 dp_module, u8 dp_level)
  1847. {
  1848. struct net_device *ndev;
  1849. struct qede_dev *edev;
  1850. ndev = alloc_etherdev_mqs(sizeof(*edev),
  1851. info->num_queues, info->num_queues);
  1852. if (!ndev) {
  1853. pr_err("etherdev allocation failed\n");
  1854. return NULL;
  1855. }
  1856. edev = netdev_priv(ndev);
  1857. edev->ndev = ndev;
  1858. edev->cdev = cdev;
  1859. edev->pdev = pdev;
  1860. edev->dp_module = dp_module;
  1861. edev->dp_level = dp_level;
  1862. edev->ops = qed_ops;
  1863. edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
  1864. edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
  1865. DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
  1866. info->num_queues, info->num_queues);
  1867. SET_NETDEV_DEV(ndev, &pdev->dev);
  1868. memset(&edev->stats, 0, sizeof(edev->stats));
  1869. memcpy(&edev->dev_info, info, sizeof(*info));
  1870. edev->num_tc = edev->dev_info.num_tc;
  1871. INIT_LIST_HEAD(&edev->vlan_list);
  1872. return edev;
  1873. }
  1874. static void qede_init_ndev(struct qede_dev *edev)
  1875. {
  1876. struct net_device *ndev = edev->ndev;
  1877. struct pci_dev *pdev = edev->pdev;
  1878. u32 hw_features;
  1879. pci_set_drvdata(pdev, ndev);
  1880. ndev->mem_start = edev->dev_info.common.pci_mem_start;
  1881. ndev->base_addr = ndev->mem_start;
  1882. ndev->mem_end = edev->dev_info.common.pci_mem_end;
  1883. ndev->irq = edev->dev_info.common.pci_irq;
  1884. ndev->watchdog_timeo = TX_TIMEOUT;
  1885. ndev->netdev_ops = &qede_netdev_ops;
  1886. qede_set_ethtool_ops(ndev);
  1887. /* user-changeble features */
  1888. hw_features = NETIF_F_GRO | NETIF_F_SG |
  1889. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1890. NETIF_F_TSO | NETIF_F_TSO6;
  1891. /* Encap features*/
  1892. hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
  1893. NETIF_F_TSO_ECN;
  1894. ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1895. NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
  1896. NETIF_F_TSO6 | NETIF_F_GSO_GRE |
  1897. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
  1898. ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  1899. NETIF_F_HIGHDMA;
  1900. ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  1901. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
  1902. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
  1903. ndev->hw_features = hw_features;
  1904. /* Set network device HW mac */
  1905. ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
  1906. }
  1907. /* This function converts from 32b param to two params of level and module
  1908. * Input 32b decoding:
  1909. * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
  1910. * 'happy' flow, e.g. memory allocation failed.
  1911. * b30 - enable all INFO prints. INFO prints are for major steps in the flow
  1912. * and provide important parameters.
  1913. * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
  1914. * module. VERBOSE prints are for tracking the specific flow in low level.
  1915. *
  1916. * Notice that the level should be that of the lowest required logs.
  1917. */
  1918. void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
  1919. {
  1920. *p_dp_level = QED_LEVEL_NOTICE;
  1921. *p_dp_module = 0;
  1922. if (debug & QED_LOG_VERBOSE_MASK) {
  1923. *p_dp_level = QED_LEVEL_VERBOSE;
  1924. *p_dp_module = (debug & 0x3FFFFFFF);
  1925. } else if (debug & QED_LOG_INFO_MASK) {
  1926. *p_dp_level = QED_LEVEL_INFO;
  1927. } else if (debug & QED_LOG_NOTICE_MASK) {
  1928. *p_dp_level = QED_LEVEL_NOTICE;
  1929. }
  1930. }
  1931. static void qede_free_fp_array(struct qede_dev *edev)
  1932. {
  1933. if (edev->fp_array) {
  1934. struct qede_fastpath *fp;
  1935. int i;
  1936. for_each_queue(i) {
  1937. fp = &edev->fp_array[i];
  1938. kfree(fp->sb_info);
  1939. kfree(fp->rxq);
  1940. kfree(fp->txqs);
  1941. }
  1942. kfree(edev->fp_array);
  1943. }
  1944. edev->num_queues = 0;
  1945. edev->fp_num_tx = 0;
  1946. edev->fp_num_rx = 0;
  1947. }
  1948. static int qede_alloc_fp_array(struct qede_dev *edev)
  1949. {
  1950. u8 fp_combined, fp_rx = edev->fp_num_rx;
  1951. struct qede_fastpath *fp;
  1952. int i;
  1953. edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
  1954. sizeof(*edev->fp_array), GFP_KERNEL);
  1955. if (!edev->fp_array) {
  1956. DP_NOTICE(edev, "fp array allocation failed\n");
  1957. goto err;
  1958. }
  1959. fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
  1960. /* Allocate the FP elements for Rx queues followed by combined and then
  1961. * the Tx. This ordering should be maintained so that the respective
  1962. * queues (Rx or Tx) will be together in the fastpath array and the
  1963. * associated ids will be sequential.
  1964. */
  1965. for_each_queue(i) {
  1966. fp = &edev->fp_array[i];
  1967. fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
  1968. if (!fp->sb_info) {
  1969. DP_NOTICE(edev, "sb info struct allocation failed\n");
  1970. goto err;
  1971. }
  1972. if (fp_rx) {
  1973. fp->type = QEDE_FASTPATH_RX;
  1974. fp_rx--;
  1975. } else if (fp_combined) {
  1976. fp->type = QEDE_FASTPATH_COMBINED;
  1977. fp_combined--;
  1978. } else {
  1979. fp->type = QEDE_FASTPATH_TX;
  1980. }
  1981. if (fp->type & QEDE_FASTPATH_TX) {
  1982. fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
  1983. GFP_KERNEL);
  1984. if (!fp->txqs) {
  1985. DP_NOTICE(edev,
  1986. "TXQ array allocation failed\n");
  1987. goto err;
  1988. }
  1989. }
  1990. if (fp->type & QEDE_FASTPATH_RX) {
  1991. fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
  1992. if (!fp->rxq) {
  1993. DP_NOTICE(edev,
  1994. "RXQ struct allocation failed\n");
  1995. goto err;
  1996. }
  1997. }
  1998. }
  1999. return 0;
  2000. err:
  2001. qede_free_fp_array(edev);
  2002. return -ENOMEM;
  2003. }
  2004. static void qede_sp_task(struct work_struct *work)
  2005. {
  2006. struct qede_dev *edev = container_of(work, struct qede_dev,
  2007. sp_task.work);
  2008. struct qed_dev *cdev = edev->cdev;
  2009. mutex_lock(&edev->qede_lock);
  2010. if (edev->state == QEDE_STATE_OPEN) {
  2011. if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
  2012. qede_config_rx_mode(edev->ndev);
  2013. }
  2014. if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
  2015. struct qed_tunn_params tunn_params;
  2016. memset(&tunn_params, 0, sizeof(tunn_params));
  2017. tunn_params.update_vxlan_port = 1;
  2018. tunn_params.vxlan_port = edev->vxlan_dst_port;
  2019. qed_ops->tunn_config(cdev, &tunn_params);
  2020. }
  2021. if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
  2022. struct qed_tunn_params tunn_params;
  2023. memset(&tunn_params, 0, sizeof(tunn_params));
  2024. tunn_params.update_geneve_port = 1;
  2025. tunn_params.geneve_port = edev->geneve_dst_port;
  2026. qed_ops->tunn_config(cdev, &tunn_params);
  2027. }
  2028. mutex_unlock(&edev->qede_lock);
  2029. }
  2030. static void qede_update_pf_params(struct qed_dev *cdev)
  2031. {
  2032. struct qed_pf_params pf_params;
  2033. /* 64 rx + 64 tx */
  2034. memset(&pf_params, 0, sizeof(struct qed_pf_params));
  2035. pf_params.eth_pf_params.num_cons = 128;
  2036. qed_ops->common->update_pf_params(cdev, &pf_params);
  2037. }
  2038. enum qede_probe_mode {
  2039. QEDE_PROBE_NORMAL,
  2040. };
  2041. static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
  2042. bool is_vf, enum qede_probe_mode mode)
  2043. {
  2044. struct qed_probe_params probe_params;
  2045. struct qed_slowpath_params sp_params;
  2046. struct qed_dev_eth_info dev_info;
  2047. struct qede_dev *edev;
  2048. struct qed_dev *cdev;
  2049. int rc;
  2050. if (unlikely(dp_level & QED_LEVEL_INFO))
  2051. pr_notice("Starting qede probe\n");
  2052. memset(&probe_params, 0, sizeof(probe_params));
  2053. probe_params.protocol = QED_PROTOCOL_ETH;
  2054. probe_params.dp_module = dp_module;
  2055. probe_params.dp_level = dp_level;
  2056. probe_params.is_vf = is_vf;
  2057. cdev = qed_ops->common->probe(pdev, &probe_params);
  2058. if (!cdev) {
  2059. rc = -ENODEV;
  2060. goto err0;
  2061. }
  2062. qede_update_pf_params(cdev);
  2063. /* Start the Slowpath-process */
  2064. memset(&sp_params, 0, sizeof(sp_params));
  2065. sp_params.int_mode = QED_INT_MODE_MSIX;
  2066. sp_params.drv_major = QEDE_MAJOR_VERSION;
  2067. sp_params.drv_minor = QEDE_MINOR_VERSION;
  2068. sp_params.drv_rev = QEDE_REVISION_VERSION;
  2069. sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
  2070. strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
  2071. rc = qed_ops->common->slowpath_start(cdev, &sp_params);
  2072. if (rc) {
  2073. pr_notice("Cannot start slowpath\n");
  2074. goto err1;
  2075. }
  2076. /* Learn information crucial for qede to progress */
  2077. rc = qed_ops->fill_dev_info(cdev, &dev_info);
  2078. if (rc)
  2079. goto err2;
  2080. edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
  2081. dp_level);
  2082. if (!edev) {
  2083. rc = -ENOMEM;
  2084. goto err2;
  2085. }
  2086. if (is_vf)
  2087. edev->flags |= QEDE_FLAG_IS_VF;
  2088. qede_init_ndev(edev);
  2089. rc = register_netdev(edev->ndev);
  2090. if (rc) {
  2091. DP_NOTICE(edev, "Cannot register net-device\n");
  2092. goto err3;
  2093. }
  2094. edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
  2095. edev->ops->register_ops(cdev, &qede_ll_ops, edev);
  2096. #ifdef CONFIG_DCB
  2097. if (!IS_VF(edev))
  2098. qede_set_dcbnl_ops(edev->ndev);
  2099. #endif
  2100. INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
  2101. mutex_init(&edev->qede_lock);
  2102. edev->rx_copybreak = QEDE_RX_HDR_SIZE;
  2103. DP_INFO(edev, "Ending successfully qede probe\n");
  2104. return 0;
  2105. err3:
  2106. free_netdev(edev->ndev);
  2107. err2:
  2108. qed_ops->common->slowpath_stop(cdev);
  2109. err1:
  2110. qed_ops->common->remove(cdev);
  2111. err0:
  2112. return rc;
  2113. }
  2114. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  2115. {
  2116. bool is_vf = false;
  2117. u32 dp_module = 0;
  2118. u8 dp_level = 0;
  2119. switch ((enum qede_pci_private)id->driver_data) {
  2120. case QEDE_PRIVATE_VF:
  2121. if (debug & QED_LOG_VERBOSE_MASK)
  2122. dev_err(&pdev->dev, "Probing a VF\n");
  2123. is_vf = true;
  2124. break;
  2125. default:
  2126. if (debug & QED_LOG_VERBOSE_MASK)
  2127. dev_err(&pdev->dev, "Probing a PF\n");
  2128. }
  2129. qede_config_debug(debug, &dp_module, &dp_level);
  2130. return __qede_probe(pdev, dp_module, dp_level, is_vf,
  2131. QEDE_PROBE_NORMAL);
  2132. }
  2133. enum qede_remove_mode {
  2134. QEDE_REMOVE_NORMAL,
  2135. };
  2136. static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
  2137. {
  2138. struct net_device *ndev = pci_get_drvdata(pdev);
  2139. struct qede_dev *edev = netdev_priv(ndev);
  2140. struct qed_dev *cdev = edev->cdev;
  2141. DP_INFO(edev, "Starting qede_remove\n");
  2142. cancel_delayed_work_sync(&edev->sp_task);
  2143. unregister_netdev(ndev);
  2144. edev->ops->common->set_power_state(cdev, PCI_D0);
  2145. pci_set_drvdata(pdev, NULL);
  2146. free_netdev(ndev);
  2147. /* Use global ops since we've freed edev */
  2148. qed_ops->common->slowpath_stop(cdev);
  2149. qed_ops->common->remove(cdev);
  2150. dev_info(&pdev->dev, "Ending qede_remove successfully\n");
  2151. }
  2152. static void qede_remove(struct pci_dev *pdev)
  2153. {
  2154. __qede_remove(pdev, QEDE_REMOVE_NORMAL);
  2155. }
  2156. /* -------------------------------------------------------------------------
  2157. * START OF LOAD / UNLOAD
  2158. * -------------------------------------------------------------------------
  2159. */
  2160. static int qede_set_num_queues(struct qede_dev *edev)
  2161. {
  2162. int rc;
  2163. u16 rss_num;
  2164. /* Setup queues according to possible resources*/
  2165. if (edev->req_queues)
  2166. rss_num = edev->req_queues;
  2167. else
  2168. rss_num = netif_get_num_default_rss_queues() *
  2169. edev->dev_info.common.num_hwfns;
  2170. rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
  2171. rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
  2172. if (rc > 0) {
  2173. /* Managed to request interrupts for our queues */
  2174. edev->num_queues = rc;
  2175. DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
  2176. QEDE_QUEUE_CNT(edev), rss_num);
  2177. rc = 0;
  2178. }
  2179. edev->fp_num_tx = edev->req_num_tx;
  2180. edev->fp_num_rx = edev->req_num_rx;
  2181. return rc;
  2182. }
  2183. static void qede_free_mem_sb(struct qede_dev *edev,
  2184. struct qed_sb_info *sb_info)
  2185. {
  2186. if (sb_info->sb_virt)
  2187. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
  2188. (void *)sb_info->sb_virt, sb_info->sb_phys);
  2189. }
  2190. /* This function allocates fast-path status block memory */
  2191. static int qede_alloc_mem_sb(struct qede_dev *edev,
  2192. struct qed_sb_info *sb_info, u16 sb_id)
  2193. {
  2194. struct status_block *sb_virt;
  2195. dma_addr_t sb_phys;
  2196. int rc;
  2197. sb_virt = dma_alloc_coherent(&edev->pdev->dev,
  2198. sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
  2199. if (!sb_virt) {
  2200. DP_ERR(edev, "Status block allocation failed\n");
  2201. return -ENOMEM;
  2202. }
  2203. rc = edev->ops->common->sb_init(edev->cdev, sb_info,
  2204. sb_virt, sb_phys, sb_id,
  2205. QED_SB_TYPE_L2_QUEUE);
  2206. if (rc) {
  2207. DP_ERR(edev, "Status block initialization failed\n");
  2208. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
  2209. sb_virt, sb_phys);
  2210. return rc;
  2211. }
  2212. return 0;
  2213. }
  2214. static void qede_free_rx_buffers(struct qede_dev *edev,
  2215. struct qede_rx_queue *rxq)
  2216. {
  2217. u16 i;
  2218. for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
  2219. struct sw_rx_data *rx_buf;
  2220. struct page *data;
  2221. rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
  2222. data = rx_buf->data;
  2223. dma_unmap_page(&edev->pdev->dev,
  2224. rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
  2225. rx_buf->data = NULL;
  2226. __free_page(data);
  2227. }
  2228. }
  2229. static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2230. {
  2231. int i;
  2232. if (edev->gro_disable)
  2233. return;
  2234. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  2235. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  2236. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  2237. if (replace_buf->data) {
  2238. dma_unmap_page(&edev->pdev->dev,
  2239. replace_buf->mapping,
  2240. PAGE_SIZE, DMA_FROM_DEVICE);
  2241. __free_page(replace_buf->data);
  2242. }
  2243. }
  2244. }
  2245. static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2246. {
  2247. qede_free_sge_mem(edev, rxq);
  2248. /* Free rx buffers */
  2249. qede_free_rx_buffers(edev, rxq);
  2250. /* Free the parallel SW ring */
  2251. kfree(rxq->sw_rx_ring);
  2252. /* Free the real RQ ring used by FW */
  2253. edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
  2254. edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
  2255. }
  2256. static int qede_alloc_rx_buffer(struct qede_dev *edev,
  2257. struct qede_rx_queue *rxq)
  2258. {
  2259. struct sw_rx_data *sw_rx_data;
  2260. struct eth_rx_bd *rx_bd;
  2261. dma_addr_t mapping;
  2262. struct page *data;
  2263. data = alloc_pages(GFP_ATOMIC, 0);
  2264. if (unlikely(!data)) {
  2265. DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
  2266. return -ENOMEM;
  2267. }
  2268. /* Map the entire page as it would be used
  2269. * for multiple RX buffer segment size mapping.
  2270. */
  2271. mapping = dma_map_page(&edev->pdev->dev, data, 0,
  2272. PAGE_SIZE, DMA_FROM_DEVICE);
  2273. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  2274. __free_page(data);
  2275. DP_NOTICE(edev, "Failed to map Rx buffer\n");
  2276. return -ENOMEM;
  2277. }
  2278. sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  2279. sw_rx_data->page_offset = 0;
  2280. sw_rx_data->data = data;
  2281. sw_rx_data->mapping = mapping;
  2282. /* Advance PROD and get BD pointer */
  2283. rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
  2284. WARN_ON(!rx_bd);
  2285. rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
  2286. rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
  2287. rxq->sw_rx_prod++;
  2288. return 0;
  2289. }
  2290. static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2291. {
  2292. dma_addr_t mapping;
  2293. int i;
  2294. if (edev->gro_disable)
  2295. return 0;
  2296. if (edev->ndev->mtu > PAGE_SIZE) {
  2297. edev->gro_disable = 1;
  2298. return 0;
  2299. }
  2300. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  2301. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  2302. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  2303. replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
  2304. if (unlikely(!replace_buf->data)) {
  2305. DP_NOTICE(edev,
  2306. "Failed to allocate TPA skb pool [replacement buffer]\n");
  2307. goto err;
  2308. }
  2309. mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
  2310. rxq->rx_buf_size, DMA_FROM_DEVICE);
  2311. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  2312. DP_NOTICE(edev,
  2313. "Failed to map TPA replacement buffer\n");
  2314. goto err;
  2315. }
  2316. replace_buf->mapping = mapping;
  2317. tpa_info->replace_buf.page_offset = 0;
  2318. tpa_info->replace_buf_mapping = mapping;
  2319. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  2320. }
  2321. return 0;
  2322. err:
  2323. qede_free_sge_mem(edev, rxq);
  2324. edev->gro_disable = 1;
  2325. return -ENOMEM;
  2326. }
  2327. /* This function allocates all memory needed per Rx queue */
  2328. static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2329. {
  2330. int i, rc, size;
  2331. rxq->num_rx_buffers = edev->q_num_rx_buffers;
  2332. rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
  2333. if (rxq->rx_buf_size > PAGE_SIZE)
  2334. rxq->rx_buf_size = PAGE_SIZE;
  2335. /* Segment size to spilt a page in multiple equal parts */
  2336. rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
  2337. /* Allocate the parallel driver ring for Rx buffers */
  2338. size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
  2339. rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
  2340. if (!rxq->sw_rx_ring) {
  2341. DP_ERR(edev, "Rx buffers ring allocation failed\n");
  2342. rc = -ENOMEM;
  2343. goto err;
  2344. }
  2345. /* Allocate FW Rx ring */
  2346. rc = edev->ops->common->chain_alloc(edev->cdev,
  2347. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  2348. QED_CHAIN_MODE_NEXT_PTR,
  2349. QED_CHAIN_CNT_TYPE_U16,
  2350. RX_RING_SIZE,
  2351. sizeof(struct eth_rx_bd),
  2352. &rxq->rx_bd_ring);
  2353. if (rc)
  2354. goto err;
  2355. /* Allocate FW completion ring */
  2356. rc = edev->ops->common->chain_alloc(edev->cdev,
  2357. QED_CHAIN_USE_TO_CONSUME,
  2358. QED_CHAIN_MODE_PBL,
  2359. QED_CHAIN_CNT_TYPE_U16,
  2360. RX_RING_SIZE,
  2361. sizeof(union eth_rx_cqe),
  2362. &rxq->rx_comp_ring);
  2363. if (rc)
  2364. goto err;
  2365. /* Allocate buffers for the Rx ring */
  2366. for (i = 0; i < rxq->num_rx_buffers; i++) {
  2367. rc = qede_alloc_rx_buffer(edev, rxq);
  2368. if (rc) {
  2369. DP_ERR(edev,
  2370. "Rx buffers allocation failed at index %d\n", i);
  2371. goto err;
  2372. }
  2373. }
  2374. rc = qede_alloc_sge_mem(edev, rxq);
  2375. err:
  2376. return rc;
  2377. }
  2378. static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  2379. {
  2380. /* Free the parallel SW ring */
  2381. kfree(txq->sw_tx_ring);
  2382. /* Free the real RQ ring used by FW */
  2383. edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
  2384. }
  2385. /* This function allocates all memory needed per Tx queue */
  2386. static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  2387. {
  2388. int size, rc;
  2389. union eth_tx_bd_types *p_virt;
  2390. txq->num_tx_buffers = edev->q_num_tx_buffers;
  2391. /* Allocate the parallel driver ring for Tx buffers */
  2392. size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
  2393. txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
  2394. if (!txq->sw_tx_ring) {
  2395. DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
  2396. goto err;
  2397. }
  2398. rc = edev->ops->common->chain_alloc(edev->cdev,
  2399. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  2400. QED_CHAIN_MODE_PBL,
  2401. QED_CHAIN_CNT_TYPE_U16,
  2402. NUM_TX_BDS_MAX,
  2403. sizeof(*p_virt), &txq->tx_pbl);
  2404. if (rc)
  2405. goto err;
  2406. return 0;
  2407. err:
  2408. qede_free_mem_txq(edev, txq);
  2409. return -ENOMEM;
  2410. }
  2411. /* This function frees all memory of a single fp */
  2412. static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  2413. {
  2414. int tc;
  2415. qede_free_mem_sb(edev, fp->sb_info);
  2416. if (fp->type & QEDE_FASTPATH_RX)
  2417. qede_free_mem_rxq(edev, fp->rxq);
  2418. if (fp->type & QEDE_FASTPATH_TX)
  2419. for (tc = 0; tc < edev->num_tc; tc++)
  2420. qede_free_mem_txq(edev, &fp->txqs[tc]);
  2421. }
  2422. /* This function allocates all memory needed for a single fp (i.e. an entity
  2423. * which contains status block, one rx queue and/or multiple per-TC tx queues.
  2424. */
  2425. static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  2426. {
  2427. int rc, tc;
  2428. rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
  2429. if (rc)
  2430. goto err;
  2431. if (fp->type & QEDE_FASTPATH_RX) {
  2432. rc = qede_alloc_mem_rxq(edev, fp->rxq);
  2433. if (rc)
  2434. goto err;
  2435. }
  2436. if (fp->type & QEDE_FASTPATH_TX) {
  2437. for (tc = 0; tc < edev->num_tc; tc++) {
  2438. rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
  2439. if (rc)
  2440. goto err;
  2441. }
  2442. }
  2443. return 0;
  2444. err:
  2445. return rc;
  2446. }
  2447. static void qede_free_mem_load(struct qede_dev *edev)
  2448. {
  2449. int i;
  2450. for_each_queue(i) {
  2451. struct qede_fastpath *fp = &edev->fp_array[i];
  2452. qede_free_mem_fp(edev, fp);
  2453. }
  2454. }
  2455. /* This function allocates all qede memory at NIC load. */
  2456. static int qede_alloc_mem_load(struct qede_dev *edev)
  2457. {
  2458. int rc = 0, queue_id;
  2459. for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
  2460. struct qede_fastpath *fp = &edev->fp_array[queue_id];
  2461. rc = qede_alloc_mem_fp(edev, fp);
  2462. if (rc) {
  2463. DP_ERR(edev,
  2464. "Failed to allocate memory for fastpath - rss id = %d\n",
  2465. queue_id);
  2466. qede_free_mem_load(edev);
  2467. return rc;
  2468. }
  2469. }
  2470. return 0;
  2471. }
  2472. /* This function inits fp content and resets the SB, RXQ and TXQ structures */
  2473. static void qede_init_fp(struct qede_dev *edev)
  2474. {
  2475. int queue_id, rxq_index = 0, txq_index = 0, tc;
  2476. struct qede_fastpath *fp;
  2477. for_each_queue(queue_id) {
  2478. fp = &edev->fp_array[queue_id];
  2479. fp->edev = edev;
  2480. fp->id = queue_id;
  2481. memset((void *)&fp->napi, 0, sizeof(fp->napi));
  2482. memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
  2483. if (fp->type & QEDE_FASTPATH_RX) {
  2484. memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
  2485. fp->rxq->rxq_id = rxq_index++;
  2486. }
  2487. if (fp->type & QEDE_FASTPATH_TX) {
  2488. memset((void *)fp->txqs, 0,
  2489. (edev->num_tc * sizeof(*fp->txqs)));
  2490. for (tc = 0; tc < edev->num_tc; tc++) {
  2491. fp->txqs[tc].index = txq_index +
  2492. tc * QEDE_TSS_COUNT(edev);
  2493. if (edev->dev_info.is_legacy)
  2494. fp->txqs[tc].is_legacy = true;
  2495. }
  2496. txq_index++;
  2497. }
  2498. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  2499. edev->ndev->name, queue_id);
  2500. }
  2501. edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
  2502. }
  2503. static int qede_set_real_num_queues(struct qede_dev *edev)
  2504. {
  2505. int rc = 0;
  2506. rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
  2507. if (rc) {
  2508. DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
  2509. return rc;
  2510. }
  2511. rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
  2512. if (rc) {
  2513. DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
  2514. return rc;
  2515. }
  2516. return 0;
  2517. }
  2518. static void qede_napi_disable_remove(struct qede_dev *edev)
  2519. {
  2520. int i;
  2521. for_each_queue(i) {
  2522. napi_disable(&edev->fp_array[i].napi);
  2523. netif_napi_del(&edev->fp_array[i].napi);
  2524. }
  2525. }
  2526. static void qede_napi_add_enable(struct qede_dev *edev)
  2527. {
  2528. int i;
  2529. /* Add NAPI objects */
  2530. for_each_queue(i) {
  2531. netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
  2532. qede_poll, NAPI_POLL_WEIGHT);
  2533. napi_enable(&edev->fp_array[i].napi);
  2534. }
  2535. }
  2536. static void qede_sync_free_irqs(struct qede_dev *edev)
  2537. {
  2538. int i;
  2539. for (i = 0; i < edev->int_info.used_cnt; i++) {
  2540. if (edev->int_info.msix_cnt) {
  2541. synchronize_irq(edev->int_info.msix[i].vector);
  2542. free_irq(edev->int_info.msix[i].vector,
  2543. &edev->fp_array[i]);
  2544. } else {
  2545. edev->ops->common->simd_handler_clean(edev->cdev, i);
  2546. }
  2547. }
  2548. edev->int_info.used_cnt = 0;
  2549. }
  2550. static int qede_req_msix_irqs(struct qede_dev *edev)
  2551. {
  2552. int i, rc;
  2553. /* Sanitize number of interrupts == number of prepared RSS queues */
  2554. if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
  2555. DP_ERR(edev,
  2556. "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
  2557. QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
  2558. return -EINVAL;
  2559. }
  2560. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
  2561. rc = request_irq(edev->int_info.msix[i].vector,
  2562. qede_msix_fp_int, 0, edev->fp_array[i].name,
  2563. &edev->fp_array[i]);
  2564. if (rc) {
  2565. DP_ERR(edev, "Request fp %d irq failed\n", i);
  2566. qede_sync_free_irqs(edev);
  2567. return rc;
  2568. }
  2569. DP_VERBOSE(edev, NETIF_MSG_INTR,
  2570. "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
  2571. edev->fp_array[i].name, i,
  2572. &edev->fp_array[i]);
  2573. edev->int_info.used_cnt++;
  2574. }
  2575. return 0;
  2576. }
  2577. static void qede_simd_fp_handler(void *cookie)
  2578. {
  2579. struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
  2580. napi_schedule_irqoff(&fp->napi);
  2581. }
  2582. static int qede_setup_irqs(struct qede_dev *edev)
  2583. {
  2584. int i, rc = 0;
  2585. /* Learn Interrupt configuration */
  2586. rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
  2587. if (rc)
  2588. return rc;
  2589. if (edev->int_info.msix_cnt) {
  2590. rc = qede_req_msix_irqs(edev);
  2591. if (rc)
  2592. return rc;
  2593. edev->ndev->irq = edev->int_info.msix[0].vector;
  2594. } else {
  2595. const struct qed_common_ops *ops;
  2596. /* qed should learn receive the RSS ids and callbacks */
  2597. ops = edev->ops->common;
  2598. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
  2599. ops->simd_handler_config(edev->cdev,
  2600. &edev->fp_array[i], i,
  2601. qede_simd_fp_handler);
  2602. edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
  2603. }
  2604. return 0;
  2605. }
  2606. static int qede_drain_txq(struct qede_dev *edev,
  2607. struct qede_tx_queue *txq, bool allow_drain)
  2608. {
  2609. int rc, cnt = 1000;
  2610. while (txq->sw_tx_cons != txq->sw_tx_prod) {
  2611. if (!cnt) {
  2612. if (allow_drain) {
  2613. DP_NOTICE(edev,
  2614. "Tx queue[%d] is stuck, requesting MCP to drain\n",
  2615. txq->index);
  2616. rc = edev->ops->common->drain(edev->cdev);
  2617. if (rc)
  2618. return rc;
  2619. return qede_drain_txq(edev, txq, false);
  2620. }
  2621. DP_NOTICE(edev,
  2622. "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
  2623. txq->index, txq->sw_tx_prod,
  2624. txq->sw_tx_cons);
  2625. return -ENODEV;
  2626. }
  2627. cnt--;
  2628. usleep_range(1000, 2000);
  2629. barrier();
  2630. }
  2631. /* FW finished processing, wait for HW to transmit all tx packets */
  2632. usleep_range(1000, 2000);
  2633. return 0;
  2634. }
  2635. static int qede_stop_queues(struct qede_dev *edev)
  2636. {
  2637. struct qed_update_vport_params vport_update_params;
  2638. struct qed_dev *cdev = edev->cdev;
  2639. int rc, tc, i;
  2640. /* Disable the vport */
  2641. memset(&vport_update_params, 0, sizeof(vport_update_params));
  2642. vport_update_params.vport_id = 0;
  2643. vport_update_params.update_vport_active_flg = 1;
  2644. vport_update_params.vport_active_flg = 0;
  2645. vport_update_params.update_rss_flg = 0;
  2646. rc = edev->ops->vport_update(cdev, &vport_update_params);
  2647. if (rc) {
  2648. DP_ERR(edev, "Failed to update vport\n");
  2649. return rc;
  2650. }
  2651. /* Flush Tx queues. If needed, request drain from MCP */
  2652. for_each_queue(i) {
  2653. struct qede_fastpath *fp = &edev->fp_array[i];
  2654. if (fp->type & QEDE_FASTPATH_TX) {
  2655. for (tc = 0; tc < edev->num_tc; tc++) {
  2656. struct qede_tx_queue *txq = &fp->txqs[tc];
  2657. rc = qede_drain_txq(edev, txq, true);
  2658. if (rc)
  2659. return rc;
  2660. }
  2661. }
  2662. }
  2663. /* Stop all Queues in reverse order */
  2664. for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
  2665. struct qed_stop_rxq_params rx_params;
  2666. /* Stop the Tx Queue(s) */
  2667. if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
  2668. for (tc = 0; tc < edev->num_tc; tc++) {
  2669. struct qed_stop_txq_params tx_params;
  2670. u8 val;
  2671. tx_params.rss_id = i;
  2672. val = edev->fp_array[i].txqs[tc].index;
  2673. tx_params.tx_queue_id = val;
  2674. rc = edev->ops->q_tx_stop(cdev, &tx_params);
  2675. if (rc) {
  2676. DP_ERR(edev, "Failed to stop TXQ #%d\n",
  2677. tx_params.tx_queue_id);
  2678. return rc;
  2679. }
  2680. }
  2681. }
  2682. /* Stop the Rx Queue */
  2683. if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
  2684. memset(&rx_params, 0, sizeof(rx_params));
  2685. rx_params.rss_id = i;
  2686. rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
  2687. rc = edev->ops->q_rx_stop(cdev, &rx_params);
  2688. if (rc) {
  2689. DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
  2690. return rc;
  2691. }
  2692. }
  2693. }
  2694. /* Stop the vport */
  2695. rc = edev->ops->vport_stop(cdev, 0);
  2696. if (rc)
  2697. DP_ERR(edev, "Failed to stop VPORT\n");
  2698. return rc;
  2699. }
  2700. static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
  2701. {
  2702. int rc, tc, i;
  2703. int vlan_removal_en = 1;
  2704. struct qed_dev *cdev = edev->cdev;
  2705. struct qed_update_vport_params vport_update_params;
  2706. struct qed_queue_start_common_params q_params;
  2707. struct qed_dev_info *qed_info = &edev->dev_info.common;
  2708. struct qed_start_vport_params start = {0};
  2709. bool reset_rss_indir = false;
  2710. if (!edev->num_queues) {
  2711. DP_ERR(edev,
  2712. "Cannot update V-VPORT as active as there are no Rx queues\n");
  2713. return -EINVAL;
  2714. }
  2715. start.gro_enable = !edev->gro_disable;
  2716. start.mtu = edev->ndev->mtu;
  2717. start.vport_id = 0;
  2718. start.drop_ttl0 = true;
  2719. start.remove_inner_vlan = vlan_removal_en;
  2720. start.clear_stats = clear_stats;
  2721. rc = edev->ops->vport_start(cdev, &start);
  2722. if (rc) {
  2723. DP_ERR(edev, "Start V-PORT failed %d\n", rc);
  2724. return rc;
  2725. }
  2726. DP_VERBOSE(edev, NETIF_MSG_IFUP,
  2727. "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
  2728. start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
  2729. for_each_queue(i) {
  2730. struct qede_fastpath *fp = &edev->fp_array[i];
  2731. dma_addr_t p_phys_table;
  2732. u32 page_cnt;
  2733. if (fp->type & QEDE_FASTPATH_RX) {
  2734. struct qede_rx_queue *rxq = fp->rxq;
  2735. __le16 *val;
  2736. memset(&q_params, 0, sizeof(q_params));
  2737. q_params.rss_id = i;
  2738. q_params.queue_id = rxq->rxq_id;
  2739. q_params.vport_id = 0;
  2740. q_params.sb = fp->sb_info->igu_sb_id;
  2741. q_params.sb_idx = RX_PI;
  2742. p_phys_table =
  2743. qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
  2744. page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
  2745. rc = edev->ops->q_rx_start(cdev, &q_params,
  2746. rxq->rx_buf_size,
  2747. rxq->rx_bd_ring.p_phys_addr,
  2748. p_phys_table,
  2749. page_cnt,
  2750. &rxq->hw_rxq_prod_addr);
  2751. if (rc) {
  2752. DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
  2753. rc);
  2754. return rc;
  2755. }
  2756. val = &fp->sb_info->sb_virt->pi_array[RX_PI];
  2757. rxq->hw_cons_ptr = val;
  2758. qede_update_rx_prod(edev, rxq);
  2759. }
  2760. if (!(fp->type & QEDE_FASTPATH_TX))
  2761. continue;
  2762. for (tc = 0; tc < edev->num_tc; tc++) {
  2763. struct qede_tx_queue *txq = &fp->txqs[tc];
  2764. p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
  2765. page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
  2766. memset(&q_params, 0, sizeof(q_params));
  2767. q_params.rss_id = i;
  2768. q_params.queue_id = txq->index;
  2769. q_params.vport_id = 0;
  2770. q_params.sb = fp->sb_info->igu_sb_id;
  2771. q_params.sb_idx = TX_PI(tc);
  2772. rc = edev->ops->q_tx_start(cdev, &q_params,
  2773. p_phys_table, page_cnt,
  2774. &txq->doorbell_addr);
  2775. if (rc) {
  2776. DP_ERR(edev, "Start TXQ #%d failed %d\n",
  2777. txq->index, rc);
  2778. return rc;
  2779. }
  2780. txq->hw_cons_ptr =
  2781. &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
  2782. SET_FIELD(txq->tx_db.data.params,
  2783. ETH_DB_DATA_DEST, DB_DEST_XCM);
  2784. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
  2785. DB_AGG_CMD_SET);
  2786. SET_FIELD(txq->tx_db.data.params,
  2787. ETH_DB_DATA_AGG_VAL_SEL,
  2788. DQ_XCM_ETH_TX_BD_PROD_CMD);
  2789. txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
  2790. }
  2791. }
  2792. /* Prepare and send the vport enable */
  2793. memset(&vport_update_params, 0, sizeof(vport_update_params));
  2794. vport_update_params.vport_id = start.vport_id;
  2795. vport_update_params.update_vport_active_flg = 1;
  2796. vport_update_params.vport_active_flg = 1;
  2797. if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
  2798. qed_info->tx_switching) {
  2799. vport_update_params.update_tx_switching_flg = 1;
  2800. vport_update_params.tx_switching_flg = 1;
  2801. }
  2802. /* Fill struct with RSS params */
  2803. if (QEDE_RSS_COUNT(edev) > 1) {
  2804. vport_update_params.update_rss_flg = 1;
  2805. /* Need to validate current RSS config uses valid entries */
  2806. for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
  2807. if (edev->rss_params.rss_ind_table[i] >=
  2808. QEDE_RSS_COUNT(edev)) {
  2809. reset_rss_indir = true;
  2810. break;
  2811. }
  2812. }
  2813. if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
  2814. reset_rss_indir) {
  2815. u16 val;
  2816. for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
  2817. u16 indir_val;
  2818. val = QEDE_RSS_COUNT(edev);
  2819. indir_val = ethtool_rxfh_indir_default(i, val);
  2820. edev->rss_params.rss_ind_table[i] = indir_val;
  2821. }
  2822. edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
  2823. }
  2824. if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
  2825. netdev_rss_key_fill(edev->rss_params.rss_key,
  2826. sizeof(edev->rss_params.rss_key));
  2827. edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
  2828. }
  2829. if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
  2830. edev->rss_params.rss_caps = QED_RSS_IPV4 |
  2831. QED_RSS_IPV6 |
  2832. QED_RSS_IPV4_TCP |
  2833. QED_RSS_IPV6_TCP;
  2834. edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
  2835. }
  2836. memcpy(&vport_update_params.rss_params, &edev->rss_params,
  2837. sizeof(vport_update_params.rss_params));
  2838. } else {
  2839. memset(&vport_update_params.rss_params, 0,
  2840. sizeof(vport_update_params.rss_params));
  2841. }
  2842. rc = edev->ops->vport_update(cdev, &vport_update_params);
  2843. if (rc) {
  2844. DP_ERR(edev, "Update V-PORT failed %d\n", rc);
  2845. return rc;
  2846. }
  2847. return 0;
  2848. }
  2849. static int qede_set_mcast_rx_mac(struct qede_dev *edev,
  2850. enum qed_filter_xcast_params_type opcode,
  2851. unsigned char *mac, int num_macs)
  2852. {
  2853. struct qed_filter_params filter_cmd;
  2854. int i;
  2855. memset(&filter_cmd, 0, sizeof(filter_cmd));
  2856. filter_cmd.type = QED_FILTER_TYPE_MCAST;
  2857. filter_cmd.filter.mcast.type = opcode;
  2858. filter_cmd.filter.mcast.num = num_macs;
  2859. for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
  2860. ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
  2861. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  2862. }
  2863. enum qede_unload_mode {
  2864. QEDE_UNLOAD_NORMAL,
  2865. };
  2866. static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
  2867. {
  2868. struct qed_link_params link_params;
  2869. int rc;
  2870. DP_INFO(edev, "Starting qede unload\n");
  2871. mutex_lock(&edev->qede_lock);
  2872. edev->state = QEDE_STATE_CLOSED;
  2873. /* Close OS Tx */
  2874. netif_tx_disable(edev->ndev);
  2875. netif_carrier_off(edev->ndev);
  2876. /* Reset the link */
  2877. memset(&link_params, 0, sizeof(link_params));
  2878. link_params.link_up = false;
  2879. edev->ops->common->set_link(edev->cdev, &link_params);
  2880. rc = qede_stop_queues(edev);
  2881. if (rc) {
  2882. qede_sync_free_irqs(edev);
  2883. goto out;
  2884. }
  2885. DP_INFO(edev, "Stopped Queues\n");
  2886. qede_vlan_mark_nonconfigured(edev);
  2887. edev->ops->fastpath_stop(edev->cdev);
  2888. /* Release the interrupts */
  2889. qede_sync_free_irqs(edev);
  2890. edev->ops->common->set_fp_int(edev->cdev, 0);
  2891. qede_napi_disable_remove(edev);
  2892. qede_free_mem_load(edev);
  2893. qede_free_fp_array(edev);
  2894. out:
  2895. mutex_unlock(&edev->qede_lock);
  2896. DP_INFO(edev, "Ending qede unload\n");
  2897. }
  2898. enum qede_load_mode {
  2899. QEDE_LOAD_NORMAL,
  2900. QEDE_LOAD_RELOAD,
  2901. };
  2902. static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
  2903. {
  2904. struct qed_link_params link_params;
  2905. struct qed_link_output link_output;
  2906. int rc;
  2907. DP_INFO(edev, "Starting qede load\n");
  2908. rc = qede_set_num_queues(edev);
  2909. if (rc)
  2910. goto err0;
  2911. rc = qede_alloc_fp_array(edev);
  2912. if (rc)
  2913. goto err0;
  2914. qede_init_fp(edev);
  2915. rc = qede_alloc_mem_load(edev);
  2916. if (rc)
  2917. goto err1;
  2918. DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
  2919. QEDE_QUEUE_CNT(edev), edev->num_tc);
  2920. rc = qede_set_real_num_queues(edev);
  2921. if (rc)
  2922. goto err2;
  2923. qede_napi_add_enable(edev);
  2924. DP_INFO(edev, "Napi added and enabled\n");
  2925. rc = qede_setup_irqs(edev);
  2926. if (rc)
  2927. goto err3;
  2928. DP_INFO(edev, "Setup IRQs succeeded\n");
  2929. rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
  2930. if (rc)
  2931. goto err4;
  2932. DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
  2933. /* Add primary mac and set Rx filters */
  2934. ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
  2935. mutex_lock(&edev->qede_lock);
  2936. edev->state = QEDE_STATE_OPEN;
  2937. mutex_unlock(&edev->qede_lock);
  2938. /* Program un-configured VLANs */
  2939. qede_configure_vlan_filters(edev);
  2940. /* Ask for link-up using current configuration */
  2941. memset(&link_params, 0, sizeof(link_params));
  2942. link_params.link_up = true;
  2943. edev->ops->common->set_link(edev->cdev, &link_params);
  2944. /* Query whether link is already-up */
  2945. memset(&link_output, 0, sizeof(link_output));
  2946. edev->ops->common->get_link(edev->cdev, &link_output);
  2947. qede_link_update(edev, &link_output);
  2948. DP_INFO(edev, "Ending successfully qede load\n");
  2949. return 0;
  2950. err4:
  2951. qede_sync_free_irqs(edev);
  2952. memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
  2953. err3:
  2954. qede_napi_disable_remove(edev);
  2955. err2:
  2956. qede_free_mem_load(edev);
  2957. err1:
  2958. edev->ops->common->set_fp_int(edev->cdev, 0);
  2959. qede_free_fp_array(edev);
  2960. edev->num_queues = 0;
  2961. edev->fp_num_tx = 0;
  2962. edev->fp_num_rx = 0;
  2963. err0:
  2964. return rc;
  2965. }
  2966. void qede_reload(struct qede_dev *edev,
  2967. void (*func)(struct qede_dev *, union qede_reload_args *),
  2968. union qede_reload_args *args)
  2969. {
  2970. qede_unload(edev, QEDE_UNLOAD_NORMAL);
  2971. /* Call function handler to update parameters
  2972. * needed for function load.
  2973. */
  2974. if (func)
  2975. func(edev, args);
  2976. qede_load(edev, QEDE_LOAD_RELOAD);
  2977. mutex_lock(&edev->qede_lock);
  2978. qede_config_rx_mode(edev->ndev);
  2979. mutex_unlock(&edev->qede_lock);
  2980. }
  2981. /* called with rtnl_lock */
  2982. static int qede_open(struct net_device *ndev)
  2983. {
  2984. struct qede_dev *edev = netdev_priv(ndev);
  2985. int rc;
  2986. netif_carrier_off(ndev);
  2987. edev->ops->common->set_power_state(edev->cdev, PCI_D0);
  2988. rc = qede_load(edev, QEDE_LOAD_NORMAL);
  2989. if (rc)
  2990. return rc;
  2991. udp_tunnel_get_rx_info(ndev);
  2992. return 0;
  2993. }
  2994. static int qede_close(struct net_device *ndev)
  2995. {
  2996. struct qede_dev *edev = netdev_priv(ndev);
  2997. qede_unload(edev, QEDE_UNLOAD_NORMAL);
  2998. return 0;
  2999. }
  3000. static void qede_link_update(void *dev, struct qed_link_output *link)
  3001. {
  3002. struct qede_dev *edev = dev;
  3003. if (!netif_running(edev->ndev)) {
  3004. DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
  3005. return;
  3006. }
  3007. if (link->link_up) {
  3008. if (!netif_carrier_ok(edev->ndev)) {
  3009. DP_NOTICE(edev, "Link is up\n");
  3010. netif_tx_start_all_queues(edev->ndev);
  3011. netif_carrier_on(edev->ndev);
  3012. }
  3013. } else {
  3014. if (netif_carrier_ok(edev->ndev)) {
  3015. DP_NOTICE(edev, "Link is down\n");
  3016. netif_tx_disable(edev->ndev);
  3017. netif_carrier_off(edev->ndev);
  3018. }
  3019. }
  3020. }
  3021. static int qede_set_mac_addr(struct net_device *ndev, void *p)
  3022. {
  3023. struct qede_dev *edev = netdev_priv(ndev);
  3024. struct sockaddr *addr = p;
  3025. int rc;
  3026. ASSERT_RTNL(); /* @@@TBD To be removed */
  3027. DP_INFO(edev, "Set_mac_addr called\n");
  3028. if (!is_valid_ether_addr(addr->sa_data)) {
  3029. DP_NOTICE(edev, "The MAC address is not valid\n");
  3030. return -EFAULT;
  3031. }
  3032. if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
  3033. DP_NOTICE(edev, "qed prevents setting MAC\n");
  3034. return -EINVAL;
  3035. }
  3036. ether_addr_copy(ndev->dev_addr, addr->sa_data);
  3037. if (!netif_running(ndev)) {
  3038. DP_NOTICE(edev, "The device is currently down\n");
  3039. return 0;
  3040. }
  3041. /* Remove the previous primary mac */
  3042. rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
  3043. edev->primary_mac);
  3044. if (rc)
  3045. return rc;
  3046. /* Add MAC filter according to the new unicast HW MAC address */
  3047. ether_addr_copy(edev->primary_mac, ndev->dev_addr);
  3048. return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
  3049. edev->primary_mac);
  3050. }
  3051. static int
  3052. qede_configure_mcast_filtering(struct net_device *ndev,
  3053. enum qed_filter_rx_mode_type *accept_flags)
  3054. {
  3055. struct qede_dev *edev = netdev_priv(ndev);
  3056. unsigned char *mc_macs, *temp;
  3057. struct netdev_hw_addr *ha;
  3058. int rc = 0, mc_count;
  3059. size_t size;
  3060. size = 64 * ETH_ALEN;
  3061. mc_macs = kzalloc(size, GFP_KERNEL);
  3062. if (!mc_macs) {
  3063. DP_NOTICE(edev,
  3064. "Failed to allocate memory for multicast MACs\n");
  3065. rc = -ENOMEM;
  3066. goto exit;
  3067. }
  3068. temp = mc_macs;
  3069. /* Remove all previously configured MAC filters */
  3070. rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
  3071. mc_macs, 1);
  3072. if (rc)
  3073. goto exit;
  3074. netif_addr_lock_bh(ndev);
  3075. mc_count = netdev_mc_count(ndev);
  3076. if (mc_count < 64) {
  3077. netdev_for_each_mc_addr(ha, ndev) {
  3078. ether_addr_copy(temp, ha->addr);
  3079. temp += ETH_ALEN;
  3080. }
  3081. }
  3082. netif_addr_unlock_bh(ndev);
  3083. /* Check for all multicast @@@TBD resource allocation */
  3084. if ((ndev->flags & IFF_ALLMULTI) ||
  3085. (mc_count > 64)) {
  3086. if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
  3087. *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
  3088. } else {
  3089. /* Add all multicast MAC filters */
  3090. rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
  3091. mc_macs, mc_count);
  3092. }
  3093. exit:
  3094. kfree(mc_macs);
  3095. return rc;
  3096. }
  3097. static void qede_set_rx_mode(struct net_device *ndev)
  3098. {
  3099. struct qede_dev *edev = netdev_priv(ndev);
  3100. DP_INFO(edev, "qede_set_rx_mode called\n");
  3101. if (edev->state != QEDE_STATE_OPEN) {
  3102. DP_INFO(edev,
  3103. "qede_set_rx_mode called while interface is down\n");
  3104. } else {
  3105. set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
  3106. schedule_delayed_work(&edev->sp_task, 0);
  3107. }
  3108. }
  3109. /* Must be called with qede_lock held */
  3110. static void qede_config_rx_mode(struct net_device *ndev)
  3111. {
  3112. enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
  3113. struct qede_dev *edev = netdev_priv(ndev);
  3114. struct qed_filter_params rx_mode;
  3115. unsigned char *uc_macs, *temp;
  3116. struct netdev_hw_addr *ha;
  3117. int rc, uc_count;
  3118. size_t size;
  3119. netif_addr_lock_bh(ndev);
  3120. uc_count = netdev_uc_count(ndev);
  3121. size = uc_count * ETH_ALEN;
  3122. uc_macs = kzalloc(size, GFP_ATOMIC);
  3123. if (!uc_macs) {
  3124. DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
  3125. netif_addr_unlock_bh(ndev);
  3126. return;
  3127. }
  3128. temp = uc_macs;
  3129. netdev_for_each_uc_addr(ha, ndev) {
  3130. ether_addr_copy(temp, ha->addr);
  3131. temp += ETH_ALEN;
  3132. }
  3133. netif_addr_unlock_bh(ndev);
  3134. /* Configure the struct for the Rx mode */
  3135. memset(&rx_mode, 0, sizeof(struct qed_filter_params));
  3136. rx_mode.type = QED_FILTER_TYPE_RX_MODE;
  3137. /* Remove all previous unicast secondary macs and multicast macs
  3138. * (configrue / leave the primary mac)
  3139. */
  3140. rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
  3141. edev->primary_mac);
  3142. if (rc)
  3143. goto out;
  3144. /* Check for promiscuous */
  3145. if ((ndev->flags & IFF_PROMISC) ||
  3146. (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
  3147. accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
  3148. } else {
  3149. /* Add MAC filters according to the unicast secondary macs */
  3150. int i;
  3151. temp = uc_macs;
  3152. for (i = 0; i < uc_count; i++) {
  3153. rc = qede_set_ucast_rx_mac(edev,
  3154. QED_FILTER_XCAST_TYPE_ADD,
  3155. temp);
  3156. if (rc)
  3157. goto out;
  3158. temp += ETH_ALEN;
  3159. }
  3160. rc = qede_configure_mcast_filtering(ndev, &accept_flags);
  3161. if (rc)
  3162. goto out;
  3163. }
  3164. /* take care of VLAN mode */
  3165. if (ndev->flags & IFF_PROMISC) {
  3166. qede_config_accept_any_vlan(edev, true);
  3167. } else if (!edev->non_configured_vlans) {
  3168. /* It's possible that accept_any_vlan mode is set due to a
  3169. * previous setting of IFF_PROMISC. If vlan credits are
  3170. * sufficient, disable accept_any_vlan.
  3171. */
  3172. qede_config_accept_any_vlan(edev, false);
  3173. }
  3174. rx_mode.filter.accept_flags = accept_flags;
  3175. edev->ops->filter_config(edev->cdev, &rx_mode);
  3176. out:
  3177. kfree(uc_macs);
  3178. }