qede_main.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355
  1. /* QLogic qede NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/version.h>
  11. #include <linux/device.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/errno.h>
  16. #include <linux/list.h>
  17. #include <linux/string.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/interrupt.h>
  20. #include <asm/byteorder.h>
  21. #include <asm/param.h>
  22. #include <linux/io.h>
  23. #include <linux/netdev_features.h>
  24. #include <linux/udp.h>
  25. #include <linux/tcp.h>
  26. #include <net/vxlan.h>
  27. #include <linux/ip.h>
  28. #include <net/ipv6.h>
  29. #include <net/tcp.h>
  30. #include <linux/if_ether.h>
  31. #include <linux/if_vlan.h>
  32. #include <linux/pkt_sched.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/in.h>
  35. #include <linux/random.h>
  36. #include <net/ip6_checksum.h>
  37. #include <linux/bitops.h>
  38. #include "qede.h"
  39. static char version[] =
  40. "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
  41. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  42. MODULE_LICENSE("GPL");
  43. MODULE_VERSION(DRV_MODULE_VERSION);
  44. static uint debug;
  45. module_param(debug, uint, 0);
  46. MODULE_PARM_DESC(debug, " Default debug msglevel");
  47. static const struct qed_eth_ops *qed_ops;
  48. #define CHIP_NUM_57980S_40 0x1634
  49. #define CHIP_NUM_57980S_10 0x1666
  50. #define CHIP_NUM_57980S_MF 0x1636
  51. #define CHIP_NUM_57980S_100 0x1644
  52. #define CHIP_NUM_57980S_50 0x1654
  53. #define CHIP_NUM_57980S_25 0x1656
  54. #ifndef PCI_DEVICE_ID_NX2_57980E
  55. #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
  56. #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
  57. #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
  58. #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
  59. #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
  60. #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
  61. #endif
  62. static const struct pci_device_id qede_pci_tbl[] = {
  63. { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
  64. { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
  65. { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
  66. { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
  67. { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
  68. { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
  69. { 0 }
  70. };
  71. MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
  72. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  73. #define TX_TIMEOUT (5 * HZ)
  74. static void qede_remove(struct pci_dev *pdev);
  75. static int qede_alloc_rx_buffer(struct qede_dev *edev,
  76. struct qede_rx_queue *rxq);
  77. static void qede_link_update(void *dev, struct qed_link_output *link);
  78. static struct pci_driver qede_pci_driver = {
  79. .name = "qede",
  80. .id_table = qede_pci_tbl,
  81. .probe = qede_probe,
  82. .remove = qede_remove,
  83. };
  84. static struct qed_eth_cb_ops qede_ll_ops = {
  85. {
  86. .link_update = qede_link_update,
  87. },
  88. };
  89. static int qede_netdev_event(struct notifier_block *this, unsigned long event,
  90. void *ptr)
  91. {
  92. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  93. struct ethtool_drvinfo drvinfo;
  94. struct qede_dev *edev;
  95. /* Currently only support name change */
  96. if (event != NETDEV_CHANGENAME)
  97. goto done;
  98. /* Check whether this is a qede device */
  99. if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
  100. goto done;
  101. memset(&drvinfo, 0, sizeof(drvinfo));
  102. ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
  103. if (strcmp(drvinfo.driver, "qede"))
  104. goto done;
  105. edev = netdev_priv(ndev);
  106. /* Notify qed of the name change */
  107. if (!edev->ops || !edev->ops->common)
  108. goto done;
  109. edev->ops->common->set_id(edev->cdev, edev->ndev->name,
  110. "qede");
  111. done:
  112. return NOTIFY_DONE;
  113. }
  114. static struct notifier_block qede_netdev_notifier = {
  115. .notifier_call = qede_netdev_event,
  116. };
  117. static
  118. int __init qede_init(void)
  119. {
  120. int ret;
  121. u32 qed_ver;
  122. pr_notice("qede_init: %s\n", version);
  123. qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
  124. if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
  125. pr_notice("Version mismatch [%08x != %08x]\n",
  126. qed_ver,
  127. QEDE_ETH_INTERFACE_VERSION);
  128. return -EINVAL;
  129. }
  130. qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
  131. if (!qed_ops) {
  132. pr_notice("Failed to get qed ethtool operations\n");
  133. return -EINVAL;
  134. }
  135. /* Must register notifier before pci ops, since we might miss
  136. * interface rename after pci probe and netdev registeration.
  137. */
  138. ret = register_netdevice_notifier(&qede_netdev_notifier);
  139. if (ret) {
  140. pr_notice("Failed to register netdevice_notifier\n");
  141. qed_put_eth_ops();
  142. return -EINVAL;
  143. }
  144. ret = pci_register_driver(&qede_pci_driver);
  145. if (ret) {
  146. pr_notice("Failed to register driver\n");
  147. unregister_netdevice_notifier(&qede_netdev_notifier);
  148. qed_put_eth_ops();
  149. return -EINVAL;
  150. }
  151. return 0;
  152. }
  153. static void __exit qede_cleanup(void)
  154. {
  155. pr_notice("qede_cleanup called\n");
  156. unregister_netdevice_notifier(&qede_netdev_notifier);
  157. pci_unregister_driver(&qede_pci_driver);
  158. qed_put_eth_ops();
  159. }
  160. module_init(qede_init);
  161. module_exit(qede_cleanup);
  162. /* -------------------------------------------------------------------------
  163. * START OF FAST-PATH
  164. * -------------------------------------------------------------------------
  165. */
  166. /* Unmap the data and free skb */
  167. static int qede_free_tx_pkt(struct qede_dev *edev,
  168. struct qede_tx_queue *txq,
  169. int *len)
  170. {
  171. u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
  172. struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
  173. struct eth_tx_1st_bd *first_bd;
  174. struct eth_tx_bd *tx_data_bd;
  175. int bds_consumed = 0;
  176. int nbds;
  177. bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
  178. int i, split_bd_len = 0;
  179. if (unlikely(!skb)) {
  180. DP_ERR(edev,
  181. "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
  182. idx, txq->sw_tx_cons, txq->sw_tx_prod);
  183. return -1;
  184. }
  185. *len = skb->len;
  186. first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
  187. bds_consumed++;
  188. nbds = first_bd->data.nbds;
  189. if (data_split) {
  190. struct eth_tx_bd *split = (struct eth_tx_bd *)
  191. qed_chain_consume(&txq->tx_pbl);
  192. split_bd_len = BD_UNMAP_LEN(split);
  193. bds_consumed++;
  194. }
  195. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
  196. BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
  197. /* Unmap the data of the skb frags */
  198. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
  199. tx_data_bd = (struct eth_tx_bd *)
  200. qed_chain_consume(&txq->tx_pbl);
  201. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
  202. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  203. }
  204. while (bds_consumed++ < nbds)
  205. qed_chain_consume(&txq->tx_pbl);
  206. /* Free skb */
  207. dev_kfree_skb_any(skb);
  208. txq->sw_tx_ring[idx].skb = NULL;
  209. txq->sw_tx_ring[idx].flags = 0;
  210. return 0;
  211. }
  212. /* Unmap the data and free skb when mapping failed during start_xmit */
  213. static void qede_free_failed_tx_pkt(struct qede_dev *edev,
  214. struct qede_tx_queue *txq,
  215. struct eth_tx_1st_bd *first_bd,
  216. int nbd,
  217. bool data_split)
  218. {
  219. u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
  220. struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
  221. struct eth_tx_bd *tx_data_bd;
  222. int i, split_bd_len = 0;
  223. /* Return prod to its position before this skb was handled */
  224. qed_chain_set_prod(&txq->tx_pbl,
  225. le16_to_cpu(txq->tx_db.data.bd_prod),
  226. first_bd);
  227. first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
  228. if (data_split) {
  229. struct eth_tx_bd *split = (struct eth_tx_bd *)
  230. qed_chain_produce(&txq->tx_pbl);
  231. split_bd_len = BD_UNMAP_LEN(split);
  232. nbd--;
  233. }
  234. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
  235. BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
  236. /* Unmap the data of the skb frags */
  237. for (i = 0; i < nbd; i++) {
  238. tx_data_bd = (struct eth_tx_bd *)
  239. qed_chain_produce(&txq->tx_pbl);
  240. if (tx_data_bd->nbytes)
  241. dma_unmap_page(&edev->pdev->dev,
  242. BD_UNMAP_ADDR(tx_data_bd),
  243. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  244. }
  245. /* Return again prod to its position before this skb was handled */
  246. qed_chain_set_prod(&txq->tx_pbl,
  247. le16_to_cpu(txq->tx_db.data.bd_prod),
  248. first_bd);
  249. /* Free skb */
  250. dev_kfree_skb_any(skb);
  251. txq->sw_tx_ring[idx].skb = NULL;
  252. txq->sw_tx_ring[idx].flags = 0;
  253. }
  254. static u32 qede_xmit_type(struct qede_dev *edev,
  255. struct sk_buff *skb,
  256. int *ipv6_ext)
  257. {
  258. u32 rc = XMIT_L4_CSUM;
  259. __be16 l3_proto;
  260. if (skb->ip_summed != CHECKSUM_PARTIAL)
  261. return XMIT_PLAIN;
  262. l3_proto = vlan_get_protocol(skb);
  263. if (l3_proto == htons(ETH_P_IPV6) &&
  264. (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
  265. *ipv6_ext = 1;
  266. if (skb_is_gso(skb))
  267. rc |= XMIT_LSO;
  268. return rc;
  269. }
  270. static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
  271. struct eth_tx_2nd_bd *second_bd,
  272. struct eth_tx_3rd_bd *third_bd)
  273. {
  274. u8 l4_proto;
  275. u16 bd2_bits1 = 0, bd2_bits2 = 0;
  276. bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
  277. bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
  278. ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
  279. << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
  280. bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
  281. ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
  282. if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
  283. l4_proto = ipv6_hdr(skb)->nexthdr;
  284. else
  285. l4_proto = ip_hdr(skb)->protocol;
  286. if (l4_proto == IPPROTO_UDP)
  287. bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
  288. if (third_bd)
  289. third_bd->data.bitfields |=
  290. cpu_to_le16(((tcp_hdrlen(skb) / 4) &
  291. ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
  292. ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
  293. second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
  294. second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
  295. }
  296. static int map_frag_to_bd(struct qede_dev *edev,
  297. skb_frag_t *frag,
  298. struct eth_tx_bd *bd)
  299. {
  300. dma_addr_t mapping;
  301. /* Map skb non-linear frag data for DMA */
  302. mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
  303. skb_frag_size(frag),
  304. DMA_TO_DEVICE);
  305. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  306. DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
  307. return -ENOMEM;
  308. }
  309. /* Setup the data pointer of the frag data */
  310. BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
  311. return 0;
  312. }
  313. /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
  314. #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
  315. static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
  316. u8 xmit_type)
  317. {
  318. int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
  319. if (xmit_type & XMIT_LSO) {
  320. int hlen;
  321. hlen = skb_transport_header(skb) +
  322. tcp_hdrlen(skb) - skb->data;
  323. /* linear payload would require its own BD */
  324. if (skb_headlen(skb) > hlen)
  325. allowed_frags--;
  326. }
  327. return (skb_shinfo(skb)->nr_frags > allowed_frags);
  328. }
  329. #endif
  330. /* Main transmit function */
  331. static
  332. netdev_tx_t qede_start_xmit(struct sk_buff *skb,
  333. struct net_device *ndev)
  334. {
  335. struct qede_dev *edev = netdev_priv(ndev);
  336. struct netdev_queue *netdev_txq;
  337. struct qede_tx_queue *txq;
  338. struct eth_tx_1st_bd *first_bd;
  339. struct eth_tx_2nd_bd *second_bd = NULL;
  340. struct eth_tx_3rd_bd *third_bd = NULL;
  341. struct eth_tx_bd *tx_data_bd = NULL;
  342. u16 txq_index;
  343. u8 nbd = 0;
  344. dma_addr_t mapping;
  345. int rc, frag_idx = 0, ipv6_ext = 0;
  346. u8 xmit_type;
  347. u16 idx;
  348. u16 hlen;
  349. bool data_split;
  350. /* Get tx-queue context and netdev index */
  351. txq_index = skb_get_queue_mapping(skb);
  352. WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
  353. txq = QEDE_TX_QUEUE(edev, txq_index);
  354. netdev_txq = netdev_get_tx_queue(ndev, txq_index);
  355. WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
  356. (MAX_SKB_FRAGS + 1));
  357. xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
  358. #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
  359. if (qede_pkt_req_lin(edev, skb, xmit_type)) {
  360. if (skb_linearize(skb)) {
  361. DP_NOTICE(edev,
  362. "SKB linearization failed - silently dropping this SKB\n");
  363. dev_kfree_skb_any(skb);
  364. return NETDEV_TX_OK;
  365. }
  366. }
  367. #endif
  368. /* Fill the entry in the SW ring and the BDs in the FW ring */
  369. idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
  370. txq->sw_tx_ring[idx].skb = skb;
  371. first_bd = (struct eth_tx_1st_bd *)
  372. qed_chain_produce(&txq->tx_pbl);
  373. memset(first_bd, 0, sizeof(*first_bd));
  374. first_bd->data.bd_flags.bitfields =
  375. 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
  376. /* Map skb linear data for DMA and set in the first BD */
  377. mapping = dma_map_single(&edev->pdev->dev, skb->data,
  378. skb_headlen(skb), DMA_TO_DEVICE);
  379. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  380. DP_NOTICE(edev, "SKB mapping failed\n");
  381. qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
  382. return NETDEV_TX_OK;
  383. }
  384. nbd++;
  385. BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
  386. /* In case there is IPv6 with extension headers or LSO we need 2nd and
  387. * 3rd BDs.
  388. */
  389. if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
  390. second_bd = (struct eth_tx_2nd_bd *)
  391. qed_chain_produce(&txq->tx_pbl);
  392. memset(second_bd, 0, sizeof(*second_bd));
  393. nbd++;
  394. third_bd = (struct eth_tx_3rd_bd *)
  395. qed_chain_produce(&txq->tx_pbl);
  396. memset(third_bd, 0, sizeof(*third_bd));
  397. nbd++;
  398. /* We need to fill in additional data in second_bd... */
  399. tx_data_bd = (struct eth_tx_bd *)second_bd;
  400. }
  401. if (skb_vlan_tag_present(skb)) {
  402. first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
  403. first_bd->data.bd_flags.bitfields |=
  404. 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
  405. }
  406. /* Fill the parsing flags & params according to the requested offload */
  407. if (xmit_type & XMIT_L4_CSUM) {
  408. u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
  409. /* We don't re-calculate IP checksum as it is already done by
  410. * the upper stack
  411. */
  412. first_bd->data.bd_flags.bitfields |=
  413. 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
  414. first_bd->data.bitfields |= cpu_to_le16(temp);
  415. /* If the packet is IPv6 with extension header, indicate that
  416. * to FW and pass few params, since the device cracker doesn't
  417. * support parsing IPv6 with extension header/s.
  418. */
  419. if (unlikely(ipv6_ext))
  420. qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
  421. }
  422. if (xmit_type & XMIT_LSO) {
  423. first_bd->data.bd_flags.bitfields |=
  424. (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
  425. third_bd->data.lso_mss =
  426. cpu_to_le16(skb_shinfo(skb)->gso_size);
  427. first_bd->data.bd_flags.bitfields |=
  428. 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
  429. hlen = skb_transport_header(skb) +
  430. tcp_hdrlen(skb) - skb->data;
  431. /* @@@TBD - if will not be removed need to check */
  432. third_bd->data.bitfields |=
  433. cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
  434. /* Make life easier for FW guys who can't deal with header and
  435. * data on same BD. If we need to split, use the second bd...
  436. */
  437. if (unlikely(skb_headlen(skb) > hlen)) {
  438. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  439. "TSO split header size is %d (%x:%x)\n",
  440. first_bd->nbytes, first_bd->addr.hi,
  441. first_bd->addr.lo);
  442. mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
  443. le32_to_cpu(first_bd->addr.lo)) +
  444. hlen;
  445. BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
  446. le16_to_cpu(first_bd->nbytes) -
  447. hlen);
  448. /* this marks the BD as one that has no
  449. * individual mapping
  450. */
  451. txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
  452. first_bd->nbytes = cpu_to_le16(hlen);
  453. tx_data_bd = (struct eth_tx_bd *)third_bd;
  454. data_split = true;
  455. }
  456. }
  457. /* Handle fragmented skb */
  458. /* special handle for frags inside 2nd and 3rd bds.. */
  459. while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
  460. rc = map_frag_to_bd(edev,
  461. &skb_shinfo(skb)->frags[frag_idx],
  462. tx_data_bd);
  463. if (rc) {
  464. qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
  465. data_split);
  466. return NETDEV_TX_OK;
  467. }
  468. if (tx_data_bd == (struct eth_tx_bd *)second_bd)
  469. tx_data_bd = (struct eth_tx_bd *)third_bd;
  470. else
  471. tx_data_bd = NULL;
  472. frag_idx++;
  473. }
  474. /* map last frags into 4th, 5th .... */
  475. for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
  476. tx_data_bd = (struct eth_tx_bd *)
  477. qed_chain_produce(&txq->tx_pbl);
  478. memset(tx_data_bd, 0, sizeof(*tx_data_bd));
  479. rc = map_frag_to_bd(edev,
  480. &skb_shinfo(skb)->frags[frag_idx],
  481. tx_data_bd);
  482. if (rc) {
  483. qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
  484. data_split);
  485. return NETDEV_TX_OK;
  486. }
  487. }
  488. /* update the first BD with the actual num BDs */
  489. first_bd->data.nbds = nbd;
  490. netdev_tx_sent_queue(netdev_txq, skb->len);
  491. skb_tx_timestamp(skb);
  492. /* Advance packet producer only before sending the packet since mapping
  493. * of pages may fail.
  494. */
  495. txq->sw_tx_prod++;
  496. /* 'next page' entries are counted in the producer value */
  497. txq->tx_db.data.bd_prod =
  498. cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
  499. /* wmb makes sure that the BDs data is updated before updating the
  500. * producer, otherwise FW may read old data from the BDs.
  501. */
  502. wmb();
  503. barrier();
  504. writel(txq->tx_db.raw, txq->doorbell_addr);
  505. /* mmiowb is needed to synchronize doorbell writes from more than one
  506. * processor. It guarantees that the write arrives to the device before
  507. * the queue lock is released and another start_xmit is called (possibly
  508. * on another CPU). Without this barrier, the next doorbell can bypass
  509. * this doorbell. This is applicable to IA64/Altix systems.
  510. */
  511. mmiowb();
  512. if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
  513. < (MAX_SKB_FRAGS + 1))) {
  514. netif_tx_stop_queue(netdev_txq);
  515. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  516. "Stop queue was called\n");
  517. /* paired memory barrier is in qede_tx_int(), we have to keep
  518. * ordering of set_bit() in netif_tx_stop_queue() and read of
  519. * fp->bd_tx_cons
  520. */
  521. smp_mb();
  522. if (qed_chain_get_elem_left(&txq->tx_pbl)
  523. >= (MAX_SKB_FRAGS + 1) &&
  524. (edev->state == QEDE_STATE_OPEN)) {
  525. netif_tx_wake_queue(netdev_txq);
  526. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  527. "Wake queue was called\n");
  528. }
  529. }
  530. return NETDEV_TX_OK;
  531. }
  532. static int qede_txq_has_work(struct qede_tx_queue *txq)
  533. {
  534. u16 hw_bd_cons;
  535. /* Tell compiler that consumer and producer can change */
  536. barrier();
  537. hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
  538. if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
  539. return 0;
  540. return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
  541. }
  542. static int qede_tx_int(struct qede_dev *edev,
  543. struct qede_tx_queue *txq)
  544. {
  545. struct netdev_queue *netdev_txq;
  546. u16 hw_bd_cons;
  547. unsigned int pkts_compl = 0, bytes_compl = 0;
  548. int rc;
  549. netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
  550. hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
  551. barrier();
  552. while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
  553. int len = 0;
  554. rc = qede_free_tx_pkt(edev, txq, &len);
  555. if (rc) {
  556. DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
  557. hw_bd_cons,
  558. qed_chain_get_cons_idx(&txq->tx_pbl));
  559. break;
  560. }
  561. bytes_compl += len;
  562. pkts_compl++;
  563. txq->sw_tx_cons++;
  564. }
  565. netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
  566. /* Need to make the tx_bd_cons update visible to start_xmit()
  567. * before checking for netif_tx_queue_stopped(). Without the
  568. * memory barrier, there is a small possibility that
  569. * start_xmit() will miss it and cause the queue to be stopped
  570. * forever.
  571. * On the other hand we need an rmb() here to ensure the proper
  572. * ordering of bit testing in the following
  573. * netif_tx_queue_stopped(txq) call.
  574. */
  575. smp_mb();
  576. if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
  577. /* Taking tx_lock is needed to prevent reenabling the queue
  578. * while it's empty. This could have happen if rx_action() gets
  579. * suspended in qede_tx_int() after the condition before
  580. * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
  581. *
  582. * stops the queue->sees fresh tx_bd_cons->releases the queue->
  583. * sends some packets consuming the whole queue again->
  584. * stops the queue
  585. */
  586. __netif_tx_lock(netdev_txq, smp_processor_id());
  587. if ((netif_tx_queue_stopped(netdev_txq)) &&
  588. (edev->state == QEDE_STATE_OPEN) &&
  589. (qed_chain_get_elem_left(&txq->tx_pbl)
  590. >= (MAX_SKB_FRAGS + 1))) {
  591. netif_tx_wake_queue(netdev_txq);
  592. DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
  593. "Wake queue was called\n");
  594. }
  595. __netif_tx_unlock(netdev_txq);
  596. }
  597. return 0;
  598. }
  599. static bool qede_has_rx_work(struct qede_rx_queue *rxq)
  600. {
  601. u16 hw_comp_cons, sw_comp_cons;
  602. /* Tell compiler that status block fields can change */
  603. barrier();
  604. hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
  605. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  606. return hw_comp_cons != sw_comp_cons;
  607. }
  608. static bool qede_has_tx_work(struct qede_fastpath *fp)
  609. {
  610. u8 tc;
  611. for (tc = 0; tc < fp->edev->num_tc; tc++)
  612. if (qede_txq_has_work(&fp->txqs[tc]))
  613. return true;
  614. return false;
  615. }
  616. /* This function reuses the buffer(from an offset) from
  617. * consumer index to producer index in the bd ring
  618. */
  619. static inline void qede_reuse_page(struct qede_dev *edev,
  620. struct qede_rx_queue *rxq,
  621. struct sw_rx_data *curr_cons)
  622. {
  623. struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
  624. struct sw_rx_data *curr_prod;
  625. dma_addr_t new_mapping;
  626. curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  627. *curr_prod = *curr_cons;
  628. new_mapping = curr_prod->mapping + curr_prod->page_offset;
  629. rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
  630. rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
  631. rxq->sw_rx_prod++;
  632. curr_cons->data = NULL;
  633. }
  634. static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
  635. struct qede_rx_queue *rxq,
  636. struct sw_rx_data *curr_cons)
  637. {
  638. /* Move to the next segment in the page */
  639. curr_cons->page_offset += rxq->rx_buf_seg_size;
  640. if (curr_cons->page_offset == PAGE_SIZE) {
  641. if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
  642. return -ENOMEM;
  643. dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
  644. PAGE_SIZE, DMA_FROM_DEVICE);
  645. } else {
  646. /* Increment refcount of the page as we don't want
  647. * network stack to take the ownership of the page
  648. * which can be recycled multiple times by the driver.
  649. */
  650. atomic_inc(&curr_cons->data->_count);
  651. qede_reuse_page(edev, rxq, curr_cons);
  652. }
  653. return 0;
  654. }
  655. static inline void qede_update_rx_prod(struct qede_dev *edev,
  656. struct qede_rx_queue *rxq)
  657. {
  658. u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
  659. u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
  660. struct eth_rx_prod_data rx_prods = {0};
  661. /* Update producers */
  662. rx_prods.bd_prod = cpu_to_le16(bd_prod);
  663. rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
  664. /* Make sure that the BD and SGE data is updated before updating the
  665. * producers since FW might read the BD/SGE right after the producer
  666. * is updated.
  667. */
  668. wmb();
  669. internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
  670. (u32 *)&rx_prods);
  671. /* mmiowb is needed to synchronize doorbell writes from more than one
  672. * processor. It guarantees that the write arrives to the device before
  673. * the napi lock is released and another qede_poll is called (possibly
  674. * on another CPU). Without this barrier, the next doorbell can bypass
  675. * this doorbell. This is applicable to IA64/Altix systems.
  676. */
  677. mmiowb();
  678. }
  679. static u32 qede_get_rxhash(struct qede_dev *edev,
  680. u8 bitfields,
  681. __le32 rss_hash,
  682. enum pkt_hash_types *rxhash_type)
  683. {
  684. enum rss_hash_type htype;
  685. htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
  686. if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
  687. *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
  688. (htype == RSS_HASH_TYPE_IPV6)) ?
  689. PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
  690. return le32_to_cpu(rss_hash);
  691. }
  692. *rxhash_type = PKT_HASH_TYPE_NONE;
  693. return 0;
  694. }
  695. static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
  696. {
  697. skb_checksum_none_assert(skb);
  698. if (csum_flag & QEDE_CSUM_UNNECESSARY)
  699. skb->ip_summed = CHECKSUM_UNNECESSARY;
  700. }
  701. static inline void qede_skb_receive(struct qede_dev *edev,
  702. struct qede_fastpath *fp,
  703. struct sk_buff *skb,
  704. u16 vlan_tag)
  705. {
  706. if (vlan_tag)
  707. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  708. vlan_tag);
  709. napi_gro_receive(&fp->napi, skb);
  710. }
  711. static void qede_set_gro_params(struct qede_dev *edev,
  712. struct sk_buff *skb,
  713. struct eth_fast_path_rx_tpa_start_cqe *cqe)
  714. {
  715. u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
  716. if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
  717. PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
  718. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  719. else
  720. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  721. skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
  722. cqe->header_len;
  723. }
  724. static int qede_fill_frag_skb(struct qede_dev *edev,
  725. struct qede_rx_queue *rxq,
  726. u8 tpa_agg_index,
  727. u16 len_on_bd)
  728. {
  729. struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
  730. NUM_RX_BDS_MAX];
  731. struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
  732. struct sk_buff *skb = tpa_info->skb;
  733. if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
  734. goto out;
  735. /* Add one frag and update the appropriate fields in the skb */
  736. skb_fill_page_desc(skb, tpa_info->frag_id++,
  737. current_bd->data, current_bd->page_offset,
  738. len_on_bd);
  739. if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
  740. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  741. goto out;
  742. }
  743. qed_chain_consume(&rxq->rx_bd_ring);
  744. rxq->sw_rx_cons++;
  745. skb->data_len += len_on_bd;
  746. skb->truesize += rxq->rx_buf_seg_size;
  747. skb->len += len_on_bd;
  748. return 0;
  749. out:
  750. return -ENOMEM;
  751. }
  752. static void qede_tpa_start(struct qede_dev *edev,
  753. struct qede_rx_queue *rxq,
  754. struct eth_fast_path_rx_tpa_start_cqe *cqe)
  755. {
  756. struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
  757. struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
  758. struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
  759. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  760. dma_addr_t mapping = tpa_info->replace_buf_mapping;
  761. struct sw_rx_data *sw_rx_data_cons;
  762. struct sw_rx_data *sw_rx_data_prod;
  763. enum pkt_hash_types rxhash_type;
  764. u32 rxhash;
  765. sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
  766. sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  767. /* Use pre-allocated replacement buffer - we can't release the agg.
  768. * start until its over and we don't want to risk allocation failing
  769. * here, so re-allocate when aggregation will be over.
  770. */
  771. dma_unmap_addr_set(sw_rx_data_prod, mapping,
  772. dma_unmap_addr(replace_buf, mapping));
  773. sw_rx_data_prod->data = replace_buf->data;
  774. rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
  775. rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
  776. sw_rx_data_prod->page_offset = replace_buf->page_offset;
  777. rxq->sw_rx_prod++;
  778. /* move partial skb from cons to pool (don't unmap yet)
  779. * save mapping, incase we drop the packet later on.
  780. */
  781. tpa_info->start_buf = *sw_rx_data_cons;
  782. mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
  783. le32_to_cpu(rx_bd_cons->addr.lo));
  784. tpa_info->start_buf_mapping = mapping;
  785. rxq->sw_rx_cons++;
  786. /* set tpa state to start only if we are able to allocate skb
  787. * for this aggregation, otherwise mark as error and aggregation will
  788. * be dropped
  789. */
  790. tpa_info->skb = netdev_alloc_skb(edev->ndev,
  791. le16_to_cpu(cqe->len_on_first_bd));
  792. if (unlikely(!tpa_info->skb)) {
  793. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  794. return;
  795. }
  796. skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
  797. memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
  798. /* Start filling in the aggregation info */
  799. tpa_info->frag_id = 0;
  800. tpa_info->agg_state = QEDE_AGG_STATE_START;
  801. rxhash = qede_get_rxhash(edev, cqe->bitfields,
  802. cqe->rss_hash, &rxhash_type);
  803. skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
  804. if ((le16_to_cpu(cqe->pars_flags.flags) >>
  805. PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
  806. PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
  807. tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
  808. else
  809. tpa_info->vlan_tag = 0;
  810. /* This is needed in order to enable forwarding support */
  811. qede_set_gro_params(edev, tpa_info->skb, cqe);
  812. if (likely(cqe->ext_bd_len_list[0]))
  813. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  814. le16_to_cpu(cqe->ext_bd_len_list[0]));
  815. if (unlikely(cqe->ext_bd_len_list[1])) {
  816. DP_ERR(edev,
  817. "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
  818. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  819. }
  820. }
  821. #ifdef CONFIG_INET
  822. static void qede_gro_ip_csum(struct sk_buff *skb)
  823. {
  824. const struct iphdr *iph = ip_hdr(skb);
  825. struct tcphdr *th;
  826. skb_set_network_header(skb, 0);
  827. skb_set_transport_header(skb, sizeof(struct iphdr));
  828. th = tcp_hdr(skb);
  829. th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
  830. iph->saddr, iph->daddr, 0);
  831. tcp_gro_complete(skb);
  832. }
  833. static void qede_gro_ipv6_csum(struct sk_buff *skb)
  834. {
  835. struct ipv6hdr *iph = ipv6_hdr(skb);
  836. struct tcphdr *th;
  837. skb_set_network_header(skb, 0);
  838. skb_set_transport_header(skb, sizeof(struct ipv6hdr));
  839. th = tcp_hdr(skb);
  840. th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
  841. &iph->saddr, &iph->daddr, 0);
  842. tcp_gro_complete(skb);
  843. }
  844. #endif
  845. static void qede_gro_receive(struct qede_dev *edev,
  846. struct qede_fastpath *fp,
  847. struct sk_buff *skb,
  848. u16 vlan_tag)
  849. {
  850. #ifdef CONFIG_INET
  851. if (skb_shinfo(skb)->gso_size) {
  852. switch (skb->protocol) {
  853. case htons(ETH_P_IP):
  854. qede_gro_ip_csum(skb);
  855. break;
  856. case htons(ETH_P_IPV6):
  857. qede_gro_ipv6_csum(skb);
  858. break;
  859. default:
  860. DP_ERR(edev,
  861. "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
  862. ntohs(skb->protocol));
  863. }
  864. }
  865. #endif
  866. skb_record_rx_queue(skb, fp->rss_id);
  867. qede_skb_receive(edev, fp, skb, vlan_tag);
  868. }
  869. static inline void qede_tpa_cont(struct qede_dev *edev,
  870. struct qede_rx_queue *rxq,
  871. struct eth_fast_path_rx_tpa_cont_cqe *cqe)
  872. {
  873. int i;
  874. for (i = 0; cqe->len_list[i]; i++)
  875. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  876. le16_to_cpu(cqe->len_list[i]));
  877. if (unlikely(i > 1))
  878. DP_ERR(edev,
  879. "Strange - TPA cont with more than a single len_list entry\n");
  880. }
  881. static void qede_tpa_end(struct qede_dev *edev,
  882. struct qede_fastpath *fp,
  883. struct eth_fast_path_rx_tpa_end_cqe *cqe)
  884. {
  885. struct qede_rx_queue *rxq = fp->rxq;
  886. struct qede_agg_info *tpa_info;
  887. struct sk_buff *skb;
  888. int i;
  889. tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
  890. skb = tpa_info->skb;
  891. for (i = 0; cqe->len_list[i]; i++)
  892. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  893. le16_to_cpu(cqe->len_list[i]));
  894. if (unlikely(i > 1))
  895. DP_ERR(edev,
  896. "Strange - TPA emd with more than a single len_list entry\n");
  897. if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
  898. goto err;
  899. /* Sanity */
  900. if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
  901. DP_ERR(edev,
  902. "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
  903. cqe->num_of_bds, tpa_info->frag_id);
  904. if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
  905. DP_ERR(edev,
  906. "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
  907. le16_to_cpu(cqe->total_packet_len), skb->len);
  908. memcpy(skb->data,
  909. page_address(tpa_info->start_buf.data) +
  910. tpa_info->start_cqe.placement_offset +
  911. tpa_info->start_buf.page_offset,
  912. le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
  913. /* Recycle [mapped] start buffer for the next replacement */
  914. tpa_info->replace_buf = tpa_info->start_buf;
  915. tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
  916. /* Finalize the SKB */
  917. skb->protocol = eth_type_trans(skb, edev->ndev);
  918. skb->ip_summed = CHECKSUM_UNNECESSARY;
  919. /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
  920. * to skb_shinfo(skb)->gso_segs
  921. */
  922. NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
  923. qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
  924. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  925. return;
  926. err:
  927. /* The BD starting the aggregation is still mapped; Re-use it for
  928. * future aggregations [as replacement buffer]
  929. */
  930. memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
  931. sizeof(struct sw_rx_data));
  932. tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
  933. tpa_info->start_buf.data = NULL;
  934. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  935. dev_kfree_skb_any(tpa_info->skb);
  936. tpa_info->skb = NULL;
  937. }
  938. static u8 qede_check_csum(u16 flag)
  939. {
  940. u16 csum_flag = 0;
  941. u8 csum = 0;
  942. if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
  943. PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
  944. csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
  945. PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
  946. csum = QEDE_CSUM_UNNECESSARY;
  947. }
  948. csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
  949. PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
  950. if (csum_flag & flag)
  951. return QEDE_CSUM_ERROR;
  952. return csum;
  953. }
  954. static int qede_rx_int(struct qede_fastpath *fp, int budget)
  955. {
  956. struct qede_dev *edev = fp->edev;
  957. struct qede_rx_queue *rxq = fp->rxq;
  958. u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
  959. int rx_pkt = 0;
  960. u8 csum_flag;
  961. hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
  962. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  963. /* Memory barrier to prevent the CPU from doing speculative reads of CQE
  964. * / BD in the while-loop before reading hw_comp_cons. If the CQE is
  965. * read before it is written by FW, then FW writes CQE and SB, and then
  966. * the CPU reads the hw_comp_cons, it will use an old CQE.
  967. */
  968. rmb();
  969. /* Loop to complete all indicated BDs */
  970. while (sw_comp_cons != hw_comp_cons) {
  971. struct eth_fast_path_rx_reg_cqe *fp_cqe;
  972. enum pkt_hash_types rxhash_type;
  973. enum eth_rx_cqe_type cqe_type;
  974. struct sw_rx_data *sw_rx_data;
  975. union eth_rx_cqe *cqe;
  976. struct sk_buff *skb;
  977. struct page *data;
  978. __le16 flags;
  979. u16 len, pad;
  980. u32 rx_hash;
  981. /* Get the CQE from the completion ring */
  982. cqe = (union eth_rx_cqe *)
  983. qed_chain_consume(&rxq->rx_comp_ring);
  984. cqe_type = cqe->fast_path_regular.type;
  985. if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
  986. edev->ops->eth_cqe_completion(
  987. edev->cdev, fp->rss_id,
  988. (struct eth_slow_path_rx_cqe *)cqe);
  989. goto next_cqe;
  990. }
  991. if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
  992. switch (cqe_type) {
  993. case ETH_RX_CQE_TYPE_TPA_START:
  994. qede_tpa_start(edev, rxq,
  995. &cqe->fast_path_tpa_start);
  996. goto next_cqe;
  997. case ETH_RX_CQE_TYPE_TPA_CONT:
  998. qede_tpa_cont(edev, rxq,
  999. &cqe->fast_path_tpa_cont);
  1000. goto next_cqe;
  1001. case ETH_RX_CQE_TYPE_TPA_END:
  1002. qede_tpa_end(edev, fp,
  1003. &cqe->fast_path_tpa_end);
  1004. goto next_rx_only;
  1005. default:
  1006. break;
  1007. }
  1008. }
  1009. /* Get the data from the SW ring */
  1010. sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
  1011. sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
  1012. data = sw_rx_data->data;
  1013. fp_cqe = &cqe->fast_path_regular;
  1014. len = le16_to_cpu(fp_cqe->len_on_first_bd);
  1015. pad = fp_cqe->placement_offset;
  1016. flags = cqe->fast_path_regular.pars_flags.flags;
  1017. /* If this is an error packet then drop it */
  1018. parse_flag = le16_to_cpu(flags);
  1019. csum_flag = qede_check_csum(parse_flag);
  1020. if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
  1021. DP_NOTICE(edev,
  1022. "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
  1023. sw_comp_cons, parse_flag);
  1024. rxq->rx_hw_errors++;
  1025. qede_reuse_page(edev, rxq, sw_rx_data);
  1026. goto next_rx;
  1027. }
  1028. skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
  1029. if (unlikely(!skb)) {
  1030. DP_NOTICE(edev,
  1031. "Build_skb failed, dropping incoming packet\n");
  1032. qede_reuse_page(edev, rxq, sw_rx_data);
  1033. rxq->rx_alloc_errors++;
  1034. goto next_rx;
  1035. }
  1036. /* Copy data into SKB */
  1037. if (len + pad <= QEDE_RX_HDR_SIZE) {
  1038. memcpy(skb_put(skb, len),
  1039. page_address(data) + pad +
  1040. sw_rx_data->page_offset, len);
  1041. qede_reuse_page(edev, rxq, sw_rx_data);
  1042. } else {
  1043. struct skb_frag_struct *frag;
  1044. unsigned int pull_len;
  1045. unsigned char *va;
  1046. frag = &skb_shinfo(skb)->frags[0];
  1047. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
  1048. pad + sw_rx_data->page_offset,
  1049. len, rxq->rx_buf_seg_size);
  1050. va = skb_frag_address(frag);
  1051. pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
  1052. /* Align the pull_len to optimize memcpy */
  1053. memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
  1054. skb_frag_size_sub(frag, pull_len);
  1055. frag->page_offset += pull_len;
  1056. skb->data_len -= pull_len;
  1057. skb->tail += pull_len;
  1058. if (unlikely(qede_realloc_rx_buffer(edev, rxq,
  1059. sw_rx_data))) {
  1060. DP_ERR(edev, "Failed to allocate rx buffer\n");
  1061. rxq->rx_alloc_errors++;
  1062. goto next_cqe;
  1063. }
  1064. }
  1065. if (fp_cqe->bd_num != 1) {
  1066. u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
  1067. u8 num_frags;
  1068. pkt_len -= len;
  1069. for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
  1070. num_frags--) {
  1071. u16 cur_size = pkt_len > rxq->rx_buf_size ?
  1072. rxq->rx_buf_size : pkt_len;
  1073. WARN_ONCE(!cur_size,
  1074. "Still got %d BDs for mapping jumbo, but length became 0\n",
  1075. num_frags);
  1076. if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
  1077. goto next_cqe;
  1078. rxq->sw_rx_cons++;
  1079. sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
  1080. sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
  1081. qed_chain_consume(&rxq->rx_bd_ring);
  1082. dma_unmap_page(&edev->pdev->dev,
  1083. sw_rx_data->mapping,
  1084. PAGE_SIZE, DMA_FROM_DEVICE);
  1085. skb_fill_page_desc(skb,
  1086. skb_shinfo(skb)->nr_frags++,
  1087. sw_rx_data->data, 0,
  1088. cur_size);
  1089. skb->truesize += PAGE_SIZE;
  1090. skb->data_len += cur_size;
  1091. skb->len += cur_size;
  1092. pkt_len -= cur_size;
  1093. }
  1094. if (pkt_len)
  1095. DP_ERR(edev,
  1096. "Mapped all BDs of jumbo, but still have %d bytes\n",
  1097. pkt_len);
  1098. }
  1099. skb->protocol = eth_type_trans(skb, edev->ndev);
  1100. rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
  1101. fp_cqe->rss_hash,
  1102. &rxhash_type);
  1103. skb_set_hash(skb, rx_hash, rxhash_type);
  1104. qede_set_skb_csum(skb, csum_flag);
  1105. skb_record_rx_queue(skb, fp->rss_id);
  1106. qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
  1107. qed_chain_consume(&rxq->rx_bd_ring);
  1108. next_rx:
  1109. rxq->sw_rx_cons++;
  1110. next_rx_only:
  1111. rx_pkt++;
  1112. next_cqe: /* don't consume bd rx buffer */
  1113. qed_chain_recycle_consumed(&rxq->rx_comp_ring);
  1114. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  1115. /* CR TPA - revisit how to handle budget in TPA perhaps
  1116. * increase on "end"
  1117. */
  1118. if (rx_pkt == budget)
  1119. break;
  1120. } /* repeat while sw_comp_cons != hw_comp_cons... */
  1121. /* Update producers */
  1122. qede_update_rx_prod(edev, rxq);
  1123. return rx_pkt;
  1124. }
  1125. static int qede_poll(struct napi_struct *napi, int budget)
  1126. {
  1127. int work_done = 0;
  1128. struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
  1129. napi);
  1130. struct qede_dev *edev = fp->edev;
  1131. while (1) {
  1132. u8 tc;
  1133. for (tc = 0; tc < edev->num_tc; tc++)
  1134. if (qede_txq_has_work(&fp->txqs[tc]))
  1135. qede_tx_int(edev, &fp->txqs[tc]);
  1136. if (qede_has_rx_work(fp->rxq)) {
  1137. work_done += qede_rx_int(fp, budget - work_done);
  1138. /* must not complete if we consumed full budget */
  1139. if (work_done >= budget)
  1140. break;
  1141. }
  1142. /* Fall out from the NAPI loop if needed */
  1143. if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
  1144. qed_sb_update_sb_idx(fp->sb_info);
  1145. /* *_has_*_work() reads the status block,
  1146. * thus we need to ensure that status block indices
  1147. * have been actually read (qed_sb_update_sb_idx)
  1148. * prior to this check (*_has_*_work) so that
  1149. * we won't write the "newer" value of the status block
  1150. * to HW (if there was a DMA right after
  1151. * qede_has_rx_work and if there is no rmb, the memory
  1152. * reading (qed_sb_update_sb_idx) may be postponed
  1153. * to right before *_ack_sb). In this case there
  1154. * will never be another interrupt until there is
  1155. * another update of the status block, while there
  1156. * is still unhandled work.
  1157. */
  1158. rmb();
  1159. if (!(qede_has_rx_work(fp->rxq) ||
  1160. qede_has_tx_work(fp))) {
  1161. napi_complete(napi);
  1162. /* Update and reenable interrupts */
  1163. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
  1164. 1 /*update*/);
  1165. break;
  1166. }
  1167. }
  1168. }
  1169. return work_done;
  1170. }
  1171. static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
  1172. {
  1173. struct qede_fastpath *fp = fp_cookie;
  1174. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
  1175. napi_schedule_irqoff(&fp->napi);
  1176. return IRQ_HANDLED;
  1177. }
  1178. /* -------------------------------------------------------------------------
  1179. * END OF FAST-PATH
  1180. * -------------------------------------------------------------------------
  1181. */
  1182. static int qede_open(struct net_device *ndev);
  1183. static int qede_close(struct net_device *ndev);
  1184. static int qede_set_mac_addr(struct net_device *ndev, void *p);
  1185. static void qede_set_rx_mode(struct net_device *ndev);
  1186. static void qede_config_rx_mode(struct net_device *ndev);
  1187. static int qede_set_ucast_rx_mac(struct qede_dev *edev,
  1188. enum qed_filter_xcast_params_type opcode,
  1189. unsigned char mac[ETH_ALEN])
  1190. {
  1191. struct qed_filter_params filter_cmd;
  1192. memset(&filter_cmd, 0, sizeof(filter_cmd));
  1193. filter_cmd.type = QED_FILTER_TYPE_UCAST;
  1194. filter_cmd.filter.ucast.type = opcode;
  1195. filter_cmd.filter.ucast.mac_valid = 1;
  1196. ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
  1197. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  1198. }
  1199. static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
  1200. enum qed_filter_xcast_params_type opcode,
  1201. u16 vid)
  1202. {
  1203. struct qed_filter_params filter_cmd;
  1204. memset(&filter_cmd, 0, sizeof(filter_cmd));
  1205. filter_cmd.type = QED_FILTER_TYPE_UCAST;
  1206. filter_cmd.filter.ucast.type = opcode;
  1207. filter_cmd.filter.ucast.vlan_valid = 1;
  1208. filter_cmd.filter.ucast.vlan = vid;
  1209. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  1210. }
  1211. void qede_fill_by_demand_stats(struct qede_dev *edev)
  1212. {
  1213. struct qed_eth_stats stats;
  1214. edev->ops->get_vport_stats(edev->cdev, &stats);
  1215. edev->stats.no_buff_discards = stats.no_buff_discards;
  1216. edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
  1217. edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
  1218. edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
  1219. edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
  1220. edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
  1221. edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
  1222. edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
  1223. edev->stats.mac_filter_discards = stats.mac_filter_discards;
  1224. edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
  1225. edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
  1226. edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
  1227. edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
  1228. edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
  1229. edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
  1230. edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
  1231. edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
  1232. edev->stats.coalesced_events = stats.tpa_coalesced_events;
  1233. edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
  1234. edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
  1235. edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
  1236. edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
  1237. edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
  1238. edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
  1239. edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
  1240. edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
  1241. edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
  1242. edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
  1243. edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
  1244. edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
  1245. edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
  1246. edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
  1247. edev->stats.rx_crc_errors = stats.rx_crc_errors;
  1248. edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
  1249. edev->stats.rx_pause_frames = stats.rx_pause_frames;
  1250. edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
  1251. edev->stats.rx_align_errors = stats.rx_align_errors;
  1252. edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
  1253. edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
  1254. edev->stats.rx_jabbers = stats.rx_jabbers;
  1255. edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
  1256. edev->stats.rx_fragments = stats.rx_fragments;
  1257. edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
  1258. edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
  1259. edev->stats.tx_128_to_255_byte_packets =
  1260. stats.tx_128_to_255_byte_packets;
  1261. edev->stats.tx_256_to_511_byte_packets =
  1262. stats.tx_256_to_511_byte_packets;
  1263. edev->stats.tx_512_to_1023_byte_packets =
  1264. stats.tx_512_to_1023_byte_packets;
  1265. edev->stats.tx_1024_to_1518_byte_packets =
  1266. stats.tx_1024_to_1518_byte_packets;
  1267. edev->stats.tx_1519_to_2047_byte_packets =
  1268. stats.tx_1519_to_2047_byte_packets;
  1269. edev->stats.tx_2048_to_4095_byte_packets =
  1270. stats.tx_2048_to_4095_byte_packets;
  1271. edev->stats.tx_4096_to_9216_byte_packets =
  1272. stats.tx_4096_to_9216_byte_packets;
  1273. edev->stats.tx_9217_to_16383_byte_packets =
  1274. stats.tx_9217_to_16383_byte_packets;
  1275. edev->stats.tx_pause_frames = stats.tx_pause_frames;
  1276. edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
  1277. edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
  1278. edev->stats.tx_total_collisions = stats.tx_total_collisions;
  1279. edev->stats.brb_truncates = stats.brb_truncates;
  1280. edev->stats.brb_discards = stats.brb_discards;
  1281. edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
  1282. }
  1283. static struct rtnl_link_stats64 *qede_get_stats64(
  1284. struct net_device *dev,
  1285. struct rtnl_link_stats64 *stats)
  1286. {
  1287. struct qede_dev *edev = netdev_priv(dev);
  1288. qede_fill_by_demand_stats(edev);
  1289. stats->rx_packets = edev->stats.rx_ucast_pkts +
  1290. edev->stats.rx_mcast_pkts +
  1291. edev->stats.rx_bcast_pkts;
  1292. stats->tx_packets = edev->stats.tx_ucast_pkts +
  1293. edev->stats.tx_mcast_pkts +
  1294. edev->stats.tx_bcast_pkts;
  1295. stats->rx_bytes = edev->stats.rx_ucast_bytes +
  1296. edev->stats.rx_mcast_bytes +
  1297. edev->stats.rx_bcast_bytes;
  1298. stats->tx_bytes = edev->stats.tx_ucast_bytes +
  1299. edev->stats.tx_mcast_bytes +
  1300. edev->stats.tx_bcast_bytes;
  1301. stats->tx_errors = edev->stats.tx_err_drop_pkts;
  1302. stats->multicast = edev->stats.rx_mcast_pkts +
  1303. edev->stats.rx_bcast_pkts;
  1304. stats->rx_fifo_errors = edev->stats.no_buff_discards;
  1305. stats->collisions = edev->stats.tx_total_collisions;
  1306. stats->rx_crc_errors = edev->stats.rx_crc_errors;
  1307. stats->rx_frame_errors = edev->stats.rx_align_errors;
  1308. return stats;
  1309. }
  1310. static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
  1311. {
  1312. struct qed_update_vport_params params;
  1313. int rc;
  1314. /* Proceed only if action actually needs to be performed */
  1315. if (edev->accept_any_vlan == action)
  1316. return;
  1317. memset(&params, 0, sizeof(params));
  1318. params.vport_id = 0;
  1319. params.accept_any_vlan = action;
  1320. params.update_accept_any_vlan_flg = 1;
  1321. rc = edev->ops->vport_update(edev->cdev, &params);
  1322. if (rc) {
  1323. DP_ERR(edev, "Failed to %s accept-any-vlan\n",
  1324. action ? "enable" : "disable");
  1325. } else {
  1326. DP_INFO(edev, "%s accept-any-vlan\n",
  1327. action ? "enabled" : "disabled");
  1328. edev->accept_any_vlan = action;
  1329. }
  1330. }
  1331. static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
  1332. {
  1333. struct qede_dev *edev = netdev_priv(dev);
  1334. struct qede_vlan *vlan, *tmp;
  1335. int rc;
  1336. DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
  1337. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  1338. if (!vlan) {
  1339. DP_INFO(edev, "Failed to allocate struct for vlan\n");
  1340. return -ENOMEM;
  1341. }
  1342. INIT_LIST_HEAD(&vlan->list);
  1343. vlan->vid = vid;
  1344. vlan->configured = false;
  1345. /* Verify vlan isn't already configured */
  1346. list_for_each_entry(tmp, &edev->vlan_list, list) {
  1347. if (tmp->vid == vlan->vid) {
  1348. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  1349. "vlan already configured\n");
  1350. kfree(vlan);
  1351. return -EEXIST;
  1352. }
  1353. }
  1354. /* If interface is down, cache this VLAN ID and return */
  1355. if (edev->state != QEDE_STATE_OPEN) {
  1356. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1357. "Interface is down, VLAN %d will be configured when interface is up\n",
  1358. vid);
  1359. if (vid != 0)
  1360. edev->non_configured_vlans++;
  1361. list_add(&vlan->list, &edev->vlan_list);
  1362. return 0;
  1363. }
  1364. /* Check for the filter limit.
  1365. * Note - vlan0 has a reserved filter and can be added without
  1366. * worrying about quota
  1367. */
  1368. if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
  1369. (vlan->vid == 0)) {
  1370. rc = qede_set_ucast_rx_vlan(edev,
  1371. QED_FILTER_XCAST_TYPE_ADD,
  1372. vlan->vid);
  1373. if (rc) {
  1374. DP_ERR(edev, "Failed to configure VLAN %d\n",
  1375. vlan->vid);
  1376. kfree(vlan);
  1377. return -EINVAL;
  1378. }
  1379. vlan->configured = true;
  1380. /* vlan0 filter isn't consuming out of our quota */
  1381. if (vlan->vid != 0)
  1382. edev->configured_vlans++;
  1383. } else {
  1384. /* Out of quota; Activate accept-any-VLAN mode */
  1385. if (!edev->non_configured_vlans)
  1386. qede_config_accept_any_vlan(edev, true);
  1387. edev->non_configured_vlans++;
  1388. }
  1389. list_add(&vlan->list, &edev->vlan_list);
  1390. return 0;
  1391. }
  1392. static void qede_del_vlan_from_list(struct qede_dev *edev,
  1393. struct qede_vlan *vlan)
  1394. {
  1395. /* vlan0 filter isn't consuming out of our quota */
  1396. if (vlan->vid != 0) {
  1397. if (vlan->configured)
  1398. edev->configured_vlans--;
  1399. else
  1400. edev->non_configured_vlans--;
  1401. }
  1402. list_del(&vlan->list);
  1403. kfree(vlan);
  1404. }
  1405. static int qede_configure_vlan_filters(struct qede_dev *edev)
  1406. {
  1407. int rc = 0, real_rc = 0, accept_any_vlan = 0;
  1408. struct qed_dev_eth_info *dev_info;
  1409. struct qede_vlan *vlan = NULL;
  1410. if (list_empty(&edev->vlan_list))
  1411. return 0;
  1412. dev_info = &edev->dev_info;
  1413. /* Configure non-configured vlans */
  1414. list_for_each_entry(vlan, &edev->vlan_list, list) {
  1415. if (vlan->configured)
  1416. continue;
  1417. /* We have used all our credits, now enable accept_any_vlan */
  1418. if ((vlan->vid != 0) &&
  1419. (edev->configured_vlans == dev_info->num_vlan_filters)) {
  1420. accept_any_vlan = 1;
  1421. continue;
  1422. }
  1423. DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
  1424. rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
  1425. vlan->vid);
  1426. if (rc) {
  1427. DP_ERR(edev, "Failed to configure VLAN %u\n",
  1428. vlan->vid);
  1429. real_rc = rc;
  1430. continue;
  1431. }
  1432. vlan->configured = true;
  1433. /* vlan0 filter doesn't consume our VLAN filter's quota */
  1434. if (vlan->vid != 0) {
  1435. edev->non_configured_vlans--;
  1436. edev->configured_vlans++;
  1437. }
  1438. }
  1439. /* enable accept_any_vlan mode if we have more VLANs than credits,
  1440. * or remove accept_any_vlan mode if we've actually removed
  1441. * a non-configured vlan, and all remaining vlans are truly configured.
  1442. */
  1443. if (accept_any_vlan)
  1444. qede_config_accept_any_vlan(edev, true);
  1445. else if (!edev->non_configured_vlans)
  1446. qede_config_accept_any_vlan(edev, false);
  1447. return real_rc;
  1448. }
  1449. static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
  1450. {
  1451. struct qede_dev *edev = netdev_priv(dev);
  1452. struct qede_vlan *vlan = NULL;
  1453. int rc;
  1454. DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
  1455. /* Find whether entry exists */
  1456. list_for_each_entry(vlan, &edev->vlan_list, list)
  1457. if (vlan->vid == vid)
  1458. break;
  1459. if (!vlan || (vlan->vid != vid)) {
  1460. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  1461. "Vlan isn't configured\n");
  1462. return 0;
  1463. }
  1464. if (edev->state != QEDE_STATE_OPEN) {
  1465. /* As interface is already down, we don't have a VPORT
  1466. * instance to remove vlan filter. So just update vlan list
  1467. */
  1468. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1469. "Interface is down, removing VLAN from list only\n");
  1470. qede_del_vlan_from_list(edev, vlan);
  1471. return 0;
  1472. }
  1473. /* Remove vlan */
  1474. rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
  1475. if (rc) {
  1476. DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
  1477. return -EINVAL;
  1478. }
  1479. qede_del_vlan_from_list(edev, vlan);
  1480. /* We have removed a VLAN - try to see if we can
  1481. * configure non-configured VLAN from the list.
  1482. */
  1483. rc = qede_configure_vlan_filters(edev);
  1484. return rc;
  1485. }
  1486. static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
  1487. {
  1488. struct qede_vlan *vlan = NULL;
  1489. if (list_empty(&edev->vlan_list))
  1490. return;
  1491. list_for_each_entry(vlan, &edev->vlan_list, list) {
  1492. if (!vlan->configured)
  1493. continue;
  1494. vlan->configured = false;
  1495. /* vlan0 filter isn't consuming out of our quota */
  1496. if (vlan->vid != 0) {
  1497. edev->non_configured_vlans++;
  1498. edev->configured_vlans--;
  1499. }
  1500. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1501. "marked vlan %d as non-configured\n",
  1502. vlan->vid);
  1503. }
  1504. edev->accept_any_vlan = false;
  1505. }
  1506. static const struct net_device_ops qede_netdev_ops = {
  1507. .ndo_open = qede_open,
  1508. .ndo_stop = qede_close,
  1509. .ndo_start_xmit = qede_start_xmit,
  1510. .ndo_set_rx_mode = qede_set_rx_mode,
  1511. .ndo_set_mac_address = qede_set_mac_addr,
  1512. .ndo_validate_addr = eth_validate_addr,
  1513. .ndo_change_mtu = qede_change_mtu,
  1514. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  1515. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  1516. .ndo_get_stats64 = qede_get_stats64,
  1517. };
  1518. /* -------------------------------------------------------------------------
  1519. * START OF PROBE / REMOVE
  1520. * -------------------------------------------------------------------------
  1521. */
  1522. static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
  1523. struct pci_dev *pdev,
  1524. struct qed_dev_eth_info *info,
  1525. u32 dp_module,
  1526. u8 dp_level)
  1527. {
  1528. struct net_device *ndev;
  1529. struct qede_dev *edev;
  1530. ndev = alloc_etherdev_mqs(sizeof(*edev),
  1531. info->num_queues,
  1532. info->num_queues);
  1533. if (!ndev) {
  1534. pr_err("etherdev allocation failed\n");
  1535. return NULL;
  1536. }
  1537. edev = netdev_priv(ndev);
  1538. edev->ndev = ndev;
  1539. edev->cdev = cdev;
  1540. edev->pdev = pdev;
  1541. edev->dp_module = dp_module;
  1542. edev->dp_level = dp_level;
  1543. edev->ops = qed_ops;
  1544. edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
  1545. edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
  1546. DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
  1547. SET_NETDEV_DEV(ndev, &pdev->dev);
  1548. memset(&edev->stats, 0, sizeof(edev->stats));
  1549. memcpy(&edev->dev_info, info, sizeof(*info));
  1550. edev->num_tc = edev->dev_info.num_tc;
  1551. INIT_LIST_HEAD(&edev->vlan_list);
  1552. return edev;
  1553. }
  1554. static void qede_init_ndev(struct qede_dev *edev)
  1555. {
  1556. struct net_device *ndev = edev->ndev;
  1557. struct pci_dev *pdev = edev->pdev;
  1558. u32 hw_features;
  1559. pci_set_drvdata(pdev, ndev);
  1560. ndev->mem_start = edev->dev_info.common.pci_mem_start;
  1561. ndev->base_addr = ndev->mem_start;
  1562. ndev->mem_end = edev->dev_info.common.pci_mem_end;
  1563. ndev->irq = edev->dev_info.common.pci_irq;
  1564. ndev->watchdog_timeo = TX_TIMEOUT;
  1565. ndev->netdev_ops = &qede_netdev_ops;
  1566. qede_set_ethtool_ops(ndev);
  1567. /* user-changeble features */
  1568. hw_features = NETIF_F_GRO | NETIF_F_SG |
  1569. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1570. NETIF_F_TSO | NETIF_F_TSO6;
  1571. ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  1572. NETIF_F_HIGHDMA;
  1573. ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  1574. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
  1575. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
  1576. ndev->hw_features = hw_features;
  1577. /* Set network device HW mac */
  1578. ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
  1579. }
  1580. /* This function converts from 32b param to two params of level and module
  1581. * Input 32b decoding:
  1582. * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
  1583. * 'happy' flow, e.g. memory allocation failed.
  1584. * b30 - enable all INFO prints. INFO prints are for major steps in the flow
  1585. * and provide important parameters.
  1586. * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
  1587. * module. VERBOSE prints are for tracking the specific flow in low level.
  1588. *
  1589. * Notice that the level should be that of the lowest required logs.
  1590. */
  1591. void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
  1592. {
  1593. *p_dp_level = QED_LEVEL_NOTICE;
  1594. *p_dp_module = 0;
  1595. if (debug & QED_LOG_VERBOSE_MASK) {
  1596. *p_dp_level = QED_LEVEL_VERBOSE;
  1597. *p_dp_module = (debug & 0x3FFFFFFF);
  1598. } else if (debug & QED_LOG_INFO_MASK) {
  1599. *p_dp_level = QED_LEVEL_INFO;
  1600. } else if (debug & QED_LOG_NOTICE_MASK) {
  1601. *p_dp_level = QED_LEVEL_NOTICE;
  1602. }
  1603. }
  1604. static void qede_free_fp_array(struct qede_dev *edev)
  1605. {
  1606. if (edev->fp_array) {
  1607. struct qede_fastpath *fp;
  1608. int i;
  1609. for_each_rss(i) {
  1610. fp = &edev->fp_array[i];
  1611. kfree(fp->sb_info);
  1612. kfree(fp->rxq);
  1613. kfree(fp->txqs);
  1614. }
  1615. kfree(edev->fp_array);
  1616. }
  1617. edev->num_rss = 0;
  1618. }
  1619. static int qede_alloc_fp_array(struct qede_dev *edev)
  1620. {
  1621. struct qede_fastpath *fp;
  1622. int i;
  1623. edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
  1624. sizeof(*edev->fp_array), GFP_KERNEL);
  1625. if (!edev->fp_array) {
  1626. DP_NOTICE(edev, "fp array allocation failed\n");
  1627. goto err;
  1628. }
  1629. for_each_rss(i) {
  1630. fp = &edev->fp_array[i];
  1631. fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
  1632. if (!fp->sb_info) {
  1633. DP_NOTICE(edev, "sb info struct allocation failed\n");
  1634. goto err;
  1635. }
  1636. fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
  1637. if (!fp->rxq) {
  1638. DP_NOTICE(edev, "RXQ struct allocation failed\n");
  1639. goto err;
  1640. }
  1641. fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
  1642. if (!fp->txqs) {
  1643. DP_NOTICE(edev, "TXQ array allocation failed\n");
  1644. goto err;
  1645. }
  1646. }
  1647. return 0;
  1648. err:
  1649. qede_free_fp_array(edev);
  1650. return -ENOMEM;
  1651. }
  1652. static void qede_sp_task(struct work_struct *work)
  1653. {
  1654. struct qede_dev *edev = container_of(work, struct qede_dev,
  1655. sp_task.work);
  1656. mutex_lock(&edev->qede_lock);
  1657. if (edev->state == QEDE_STATE_OPEN) {
  1658. if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
  1659. qede_config_rx_mode(edev->ndev);
  1660. }
  1661. mutex_unlock(&edev->qede_lock);
  1662. }
  1663. static void qede_update_pf_params(struct qed_dev *cdev)
  1664. {
  1665. struct qed_pf_params pf_params;
  1666. /* 16 rx + 16 tx */
  1667. memset(&pf_params, 0, sizeof(struct qed_pf_params));
  1668. pf_params.eth_pf_params.num_cons = 32;
  1669. qed_ops->common->update_pf_params(cdev, &pf_params);
  1670. }
  1671. enum qede_probe_mode {
  1672. QEDE_PROBE_NORMAL,
  1673. };
  1674. static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
  1675. enum qede_probe_mode mode)
  1676. {
  1677. struct qed_slowpath_params params;
  1678. struct qed_dev_eth_info dev_info;
  1679. struct qede_dev *edev;
  1680. struct qed_dev *cdev;
  1681. int rc;
  1682. if (unlikely(dp_level & QED_LEVEL_INFO))
  1683. pr_notice("Starting qede probe\n");
  1684. cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
  1685. dp_module, dp_level);
  1686. if (!cdev) {
  1687. rc = -ENODEV;
  1688. goto err0;
  1689. }
  1690. qede_update_pf_params(cdev);
  1691. /* Start the Slowpath-process */
  1692. memset(&params, 0, sizeof(struct qed_slowpath_params));
  1693. params.int_mode = QED_INT_MODE_MSIX;
  1694. params.drv_major = QEDE_MAJOR_VERSION;
  1695. params.drv_minor = QEDE_MINOR_VERSION;
  1696. params.drv_rev = QEDE_REVISION_VERSION;
  1697. params.drv_eng = QEDE_ENGINEERING_VERSION;
  1698. strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
  1699. rc = qed_ops->common->slowpath_start(cdev, &params);
  1700. if (rc) {
  1701. pr_notice("Cannot start slowpath\n");
  1702. goto err1;
  1703. }
  1704. /* Learn information crucial for qede to progress */
  1705. rc = qed_ops->fill_dev_info(cdev, &dev_info);
  1706. if (rc)
  1707. goto err2;
  1708. edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
  1709. dp_level);
  1710. if (!edev) {
  1711. rc = -ENOMEM;
  1712. goto err2;
  1713. }
  1714. qede_init_ndev(edev);
  1715. rc = register_netdev(edev->ndev);
  1716. if (rc) {
  1717. DP_NOTICE(edev, "Cannot register net-device\n");
  1718. goto err3;
  1719. }
  1720. edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
  1721. edev->ops->register_ops(cdev, &qede_ll_ops, edev);
  1722. INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
  1723. mutex_init(&edev->qede_lock);
  1724. DP_INFO(edev, "Ending successfully qede probe\n");
  1725. return 0;
  1726. err3:
  1727. free_netdev(edev->ndev);
  1728. err2:
  1729. qed_ops->common->slowpath_stop(cdev);
  1730. err1:
  1731. qed_ops->common->remove(cdev);
  1732. err0:
  1733. return rc;
  1734. }
  1735. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1736. {
  1737. u32 dp_module = 0;
  1738. u8 dp_level = 0;
  1739. qede_config_debug(debug, &dp_module, &dp_level);
  1740. return __qede_probe(pdev, dp_module, dp_level,
  1741. QEDE_PROBE_NORMAL);
  1742. }
  1743. enum qede_remove_mode {
  1744. QEDE_REMOVE_NORMAL,
  1745. };
  1746. static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
  1747. {
  1748. struct net_device *ndev = pci_get_drvdata(pdev);
  1749. struct qede_dev *edev = netdev_priv(ndev);
  1750. struct qed_dev *cdev = edev->cdev;
  1751. DP_INFO(edev, "Starting qede_remove\n");
  1752. cancel_delayed_work_sync(&edev->sp_task);
  1753. unregister_netdev(ndev);
  1754. edev->ops->common->set_power_state(cdev, PCI_D0);
  1755. pci_set_drvdata(pdev, NULL);
  1756. free_netdev(ndev);
  1757. /* Use global ops since we've freed edev */
  1758. qed_ops->common->slowpath_stop(cdev);
  1759. qed_ops->common->remove(cdev);
  1760. pr_notice("Ending successfully qede_remove\n");
  1761. }
  1762. static void qede_remove(struct pci_dev *pdev)
  1763. {
  1764. __qede_remove(pdev, QEDE_REMOVE_NORMAL);
  1765. }
  1766. /* -------------------------------------------------------------------------
  1767. * START OF LOAD / UNLOAD
  1768. * -------------------------------------------------------------------------
  1769. */
  1770. static int qede_set_num_queues(struct qede_dev *edev)
  1771. {
  1772. int rc;
  1773. u16 rss_num;
  1774. /* Setup queues according to possible resources*/
  1775. if (edev->req_rss)
  1776. rss_num = edev->req_rss;
  1777. else
  1778. rss_num = netif_get_num_default_rss_queues() *
  1779. edev->dev_info.common.num_hwfns;
  1780. rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
  1781. rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
  1782. if (rc > 0) {
  1783. /* Managed to request interrupts for our queues */
  1784. edev->num_rss = rc;
  1785. DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
  1786. QEDE_RSS_CNT(edev), rss_num);
  1787. rc = 0;
  1788. }
  1789. return rc;
  1790. }
  1791. static void qede_free_mem_sb(struct qede_dev *edev,
  1792. struct qed_sb_info *sb_info)
  1793. {
  1794. if (sb_info->sb_virt)
  1795. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
  1796. (void *)sb_info->sb_virt, sb_info->sb_phys);
  1797. }
  1798. /* This function allocates fast-path status block memory */
  1799. static int qede_alloc_mem_sb(struct qede_dev *edev,
  1800. struct qed_sb_info *sb_info,
  1801. u16 sb_id)
  1802. {
  1803. struct status_block *sb_virt;
  1804. dma_addr_t sb_phys;
  1805. int rc;
  1806. sb_virt = dma_alloc_coherent(&edev->pdev->dev,
  1807. sizeof(*sb_virt),
  1808. &sb_phys, GFP_KERNEL);
  1809. if (!sb_virt) {
  1810. DP_ERR(edev, "Status block allocation failed\n");
  1811. return -ENOMEM;
  1812. }
  1813. rc = edev->ops->common->sb_init(edev->cdev, sb_info,
  1814. sb_virt, sb_phys, sb_id,
  1815. QED_SB_TYPE_L2_QUEUE);
  1816. if (rc) {
  1817. DP_ERR(edev, "Status block initialization failed\n");
  1818. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
  1819. sb_virt, sb_phys);
  1820. return rc;
  1821. }
  1822. return 0;
  1823. }
  1824. static void qede_free_rx_buffers(struct qede_dev *edev,
  1825. struct qede_rx_queue *rxq)
  1826. {
  1827. u16 i;
  1828. for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
  1829. struct sw_rx_data *rx_buf;
  1830. struct page *data;
  1831. rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
  1832. data = rx_buf->data;
  1833. dma_unmap_page(&edev->pdev->dev,
  1834. rx_buf->mapping,
  1835. PAGE_SIZE, DMA_FROM_DEVICE);
  1836. rx_buf->data = NULL;
  1837. __free_page(data);
  1838. }
  1839. }
  1840. static void qede_free_sge_mem(struct qede_dev *edev,
  1841. struct qede_rx_queue *rxq) {
  1842. int i;
  1843. if (edev->gro_disable)
  1844. return;
  1845. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  1846. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  1847. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  1848. if (replace_buf) {
  1849. dma_unmap_page(&edev->pdev->dev,
  1850. dma_unmap_addr(replace_buf, mapping),
  1851. PAGE_SIZE, DMA_FROM_DEVICE);
  1852. __free_page(replace_buf->data);
  1853. }
  1854. }
  1855. }
  1856. static void qede_free_mem_rxq(struct qede_dev *edev,
  1857. struct qede_rx_queue *rxq)
  1858. {
  1859. qede_free_sge_mem(edev, rxq);
  1860. /* Free rx buffers */
  1861. qede_free_rx_buffers(edev, rxq);
  1862. /* Free the parallel SW ring */
  1863. kfree(rxq->sw_rx_ring);
  1864. /* Free the real RQ ring used by FW */
  1865. edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
  1866. edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
  1867. }
  1868. static int qede_alloc_rx_buffer(struct qede_dev *edev,
  1869. struct qede_rx_queue *rxq)
  1870. {
  1871. struct sw_rx_data *sw_rx_data;
  1872. struct eth_rx_bd *rx_bd;
  1873. dma_addr_t mapping;
  1874. struct page *data;
  1875. u16 rx_buf_size;
  1876. rx_buf_size = rxq->rx_buf_size;
  1877. data = alloc_pages(GFP_ATOMIC, 0);
  1878. if (unlikely(!data)) {
  1879. DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
  1880. return -ENOMEM;
  1881. }
  1882. /* Map the entire page as it would be used
  1883. * for multiple RX buffer segment size mapping.
  1884. */
  1885. mapping = dma_map_page(&edev->pdev->dev, data, 0,
  1886. PAGE_SIZE, DMA_FROM_DEVICE);
  1887. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  1888. __free_page(data);
  1889. DP_NOTICE(edev, "Failed to map Rx buffer\n");
  1890. return -ENOMEM;
  1891. }
  1892. sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  1893. sw_rx_data->page_offset = 0;
  1894. sw_rx_data->data = data;
  1895. sw_rx_data->mapping = mapping;
  1896. /* Advance PROD and get BD pointer */
  1897. rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
  1898. WARN_ON(!rx_bd);
  1899. rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
  1900. rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
  1901. rxq->sw_rx_prod++;
  1902. return 0;
  1903. }
  1904. static int qede_alloc_sge_mem(struct qede_dev *edev,
  1905. struct qede_rx_queue *rxq)
  1906. {
  1907. dma_addr_t mapping;
  1908. int i;
  1909. if (edev->gro_disable)
  1910. return 0;
  1911. if (edev->ndev->mtu > PAGE_SIZE) {
  1912. edev->gro_disable = 1;
  1913. return 0;
  1914. }
  1915. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  1916. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  1917. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  1918. replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
  1919. if (unlikely(!replace_buf->data)) {
  1920. DP_NOTICE(edev,
  1921. "Failed to allocate TPA skb pool [replacement buffer]\n");
  1922. goto err;
  1923. }
  1924. mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
  1925. rxq->rx_buf_size, DMA_FROM_DEVICE);
  1926. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  1927. DP_NOTICE(edev,
  1928. "Failed to map TPA replacement buffer\n");
  1929. goto err;
  1930. }
  1931. dma_unmap_addr_set(replace_buf, mapping, mapping);
  1932. tpa_info->replace_buf.page_offset = 0;
  1933. tpa_info->replace_buf_mapping = mapping;
  1934. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  1935. }
  1936. return 0;
  1937. err:
  1938. qede_free_sge_mem(edev, rxq);
  1939. edev->gro_disable = 1;
  1940. return -ENOMEM;
  1941. }
  1942. /* This function allocates all memory needed per Rx queue */
  1943. static int qede_alloc_mem_rxq(struct qede_dev *edev,
  1944. struct qede_rx_queue *rxq)
  1945. {
  1946. int i, rc, size, num_allocated;
  1947. rxq->num_rx_buffers = edev->q_num_rx_buffers;
  1948. rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
  1949. edev->ndev->mtu;
  1950. if (rxq->rx_buf_size > PAGE_SIZE)
  1951. rxq->rx_buf_size = PAGE_SIZE;
  1952. /* Segment size to spilt a page in multiple equal parts */
  1953. rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
  1954. /* Allocate the parallel driver ring for Rx buffers */
  1955. size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
  1956. rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
  1957. if (!rxq->sw_rx_ring) {
  1958. DP_ERR(edev, "Rx buffers ring allocation failed\n");
  1959. goto err;
  1960. }
  1961. /* Allocate FW Rx ring */
  1962. rc = edev->ops->common->chain_alloc(edev->cdev,
  1963. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1964. QED_CHAIN_MODE_NEXT_PTR,
  1965. RX_RING_SIZE,
  1966. sizeof(struct eth_rx_bd),
  1967. &rxq->rx_bd_ring);
  1968. if (rc)
  1969. goto err;
  1970. /* Allocate FW completion ring */
  1971. rc = edev->ops->common->chain_alloc(edev->cdev,
  1972. QED_CHAIN_USE_TO_CONSUME,
  1973. QED_CHAIN_MODE_PBL,
  1974. RX_RING_SIZE,
  1975. sizeof(union eth_rx_cqe),
  1976. &rxq->rx_comp_ring);
  1977. if (rc)
  1978. goto err;
  1979. /* Allocate buffers for the Rx ring */
  1980. for (i = 0; i < rxq->num_rx_buffers; i++) {
  1981. rc = qede_alloc_rx_buffer(edev, rxq);
  1982. if (rc)
  1983. break;
  1984. }
  1985. num_allocated = i;
  1986. if (!num_allocated) {
  1987. DP_ERR(edev, "Rx buffers allocation failed\n");
  1988. goto err;
  1989. } else if (num_allocated < rxq->num_rx_buffers) {
  1990. DP_NOTICE(edev,
  1991. "Allocated less buffers than desired (%d allocated)\n",
  1992. num_allocated);
  1993. }
  1994. qede_alloc_sge_mem(edev, rxq);
  1995. return 0;
  1996. err:
  1997. qede_free_mem_rxq(edev, rxq);
  1998. return -ENOMEM;
  1999. }
  2000. static void qede_free_mem_txq(struct qede_dev *edev,
  2001. struct qede_tx_queue *txq)
  2002. {
  2003. /* Free the parallel SW ring */
  2004. kfree(txq->sw_tx_ring);
  2005. /* Free the real RQ ring used by FW */
  2006. edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
  2007. }
  2008. /* This function allocates all memory needed per Tx queue */
  2009. static int qede_alloc_mem_txq(struct qede_dev *edev,
  2010. struct qede_tx_queue *txq)
  2011. {
  2012. int size, rc;
  2013. union eth_tx_bd_types *p_virt;
  2014. txq->num_tx_buffers = edev->q_num_tx_buffers;
  2015. /* Allocate the parallel driver ring for Tx buffers */
  2016. size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
  2017. txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
  2018. if (!txq->sw_tx_ring) {
  2019. DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
  2020. goto err;
  2021. }
  2022. rc = edev->ops->common->chain_alloc(edev->cdev,
  2023. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  2024. QED_CHAIN_MODE_PBL,
  2025. NUM_TX_BDS_MAX,
  2026. sizeof(*p_virt),
  2027. &txq->tx_pbl);
  2028. if (rc)
  2029. goto err;
  2030. return 0;
  2031. err:
  2032. qede_free_mem_txq(edev, txq);
  2033. return -ENOMEM;
  2034. }
  2035. /* This function frees all memory of a single fp */
  2036. static void qede_free_mem_fp(struct qede_dev *edev,
  2037. struct qede_fastpath *fp)
  2038. {
  2039. int tc;
  2040. qede_free_mem_sb(edev, fp->sb_info);
  2041. qede_free_mem_rxq(edev, fp->rxq);
  2042. for (tc = 0; tc < edev->num_tc; tc++)
  2043. qede_free_mem_txq(edev, &fp->txqs[tc]);
  2044. }
  2045. /* This function allocates all memory needed for a single fp (i.e. an entity
  2046. * which contains status block, one rx queue and multiple per-TC tx queues.
  2047. */
  2048. static int qede_alloc_mem_fp(struct qede_dev *edev,
  2049. struct qede_fastpath *fp)
  2050. {
  2051. int rc, tc;
  2052. rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
  2053. if (rc)
  2054. goto err;
  2055. rc = qede_alloc_mem_rxq(edev, fp->rxq);
  2056. if (rc)
  2057. goto err;
  2058. for (tc = 0; tc < edev->num_tc; tc++) {
  2059. rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
  2060. if (rc)
  2061. goto err;
  2062. }
  2063. return 0;
  2064. err:
  2065. qede_free_mem_fp(edev, fp);
  2066. return -ENOMEM;
  2067. }
  2068. static void qede_free_mem_load(struct qede_dev *edev)
  2069. {
  2070. int i;
  2071. for_each_rss(i) {
  2072. struct qede_fastpath *fp = &edev->fp_array[i];
  2073. qede_free_mem_fp(edev, fp);
  2074. }
  2075. }
  2076. /* This function allocates all qede memory at NIC load. */
  2077. static int qede_alloc_mem_load(struct qede_dev *edev)
  2078. {
  2079. int rc = 0, rss_id;
  2080. for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
  2081. struct qede_fastpath *fp = &edev->fp_array[rss_id];
  2082. rc = qede_alloc_mem_fp(edev, fp);
  2083. if (rc)
  2084. break;
  2085. }
  2086. if (rss_id != QEDE_RSS_CNT(edev)) {
  2087. /* Failed allocating memory for all the queues */
  2088. if (!rss_id) {
  2089. DP_ERR(edev,
  2090. "Failed to allocate memory for the leading queue\n");
  2091. rc = -ENOMEM;
  2092. } else {
  2093. DP_NOTICE(edev,
  2094. "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
  2095. QEDE_RSS_CNT(edev), rss_id);
  2096. }
  2097. edev->num_rss = rss_id;
  2098. }
  2099. return 0;
  2100. }
  2101. /* This function inits fp content and resets the SB, RXQ and TXQ structures */
  2102. static void qede_init_fp(struct qede_dev *edev)
  2103. {
  2104. int rss_id, txq_index, tc;
  2105. struct qede_fastpath *fp;
  2106. for_each_rss(rss_id) {
  2107. fp = &edev->fp_array[rss_id];
  2108. fp->edev = edev;
  2109. fp->rss_id = rss_id;
  2110. memset((void *)&fp->napi, 0, sizeof(fp->napi));
  2111. memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
  2112. memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
  2113. fp->rxq->rxq_id = rss_id;
  2114. memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
  2115. for (tc = 0; tc < edev->num_tc; tc++) {
  2116. txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
  2117. fp->txqs[tc].index = txq_index;
  2118. }
  2119. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  2120. edev->ndev->name, rss_id);
  2121. }
  2122. edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
  2123. }
  2124. static int qede_set_real_num_queues(struct qede_dev *edev)
  2125. {
  2126. int rc = 0;
  2127. rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
  2128. if (rc) {
  2129. DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
  2130. return rc;
  2131. }
  2132. rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
  2133. if (rc) {
  2134. DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
  2135. return rc;
  2136. }
  2137. return 0;
  2138. }
  2139. static void qede_napi_disable_remove(struct qede_dev *edev)
  2140. {
  2141. int i;
  2142. for_each_rss(i) {
  2143. napi_disable(&edev->fp_array[i].napi);
  2144. netif_napi_del(&edev->fp_array[i].napi);
  2145. }
  2146. }
  2147. static void qede_napi_add_enable(struct qede_dev *edev)
  2148. {
  2149. int i;
  2150. /* Add NAPI objects */
  2151. for_each_rss(i) {
  2152. netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
  2153. qede_poll, NAPI_POLL_WEIGHT);
  2154. napi_enable(&edev->fp_array[i].napi);
  2155. }
  2156. }
  2157. static void qede_sync_free_irqs(struct qede_dev *edev)
  2158. {
  2159. int i;
  2160. for (i = 0; i < edev->int_info.used_cnt; i++) {
  2161. if (edev->int_info.msix_cnt) {
  2162. synchronize_irq(edev->int_info.msix[i].vector);
  2163. free_irq(edev->int_info.msix[i].vector,
  2164. &edev->fp_array[i]);
  2165. } else {
  2166. edev->ops->common->simd_handler_clean(edev->cdev, i);
  2167. }
  2168. }
  2169. edev->int_info.used_cnt = 0;
  2170. }
  2171. static int qede_req_msix_irqs(struct qede_dev *edev)
  2172. {
  2173. int i, rc;
  2174. /* Sanitize number of interrupts == number of prepared RSS queues */
  2175. if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
  2176. DP_ERR(edev,
  2177. "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
  2178. QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
  2179. return -EINVAL;
  2180. }
  2181. for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
  2182. rc = request_irq(edev->int_info.msix[i].vector,
  2183. qede_msix_fp_int, 0, edev->fp_array[i].name,
  2184. &edev->fp_array[i]);
  2185. if (rc) {
  2186. DP_ERR(edev, "Request fp %d irq failed\n", i);
  2187. qede_sync_free_irqs(edev);
  2188. return rc;
  2189. }
  2190. DP_VERBOSE(edev, NETIF_MSG_INTR,
  2191. "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
  2192. edev->fp_array[i].name, i,
  2193. &edev->fp_array[i]);
  2194. edev->int_info.used_cnt++;
  2195. }
  2196. return 0;
  2197. }
  2198. static void qede_simd_fp_handler(void *cookie)
  2199. {
  2200. struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
  2201. napi_schedule_irqoff(&fp->napi);
  2202. }
  2203. static int qede_setup_irqs(struct qede_dev *edev)
  2204. {
  2205. int i, rc = 0;
  2206. /* Learn Interrupt configuration */
  2207. rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
  2208. if (rc)
  2209. return rc;
  2210. if (edev->int_info.msix_cnt) {
  2211. rc = qede_req_msix_irqs(edev);
  2212. if (rc)
  2213. return rc;
  2214. edev->ndev->irq = edev->int_info.msix[0].vector;
  2215. } else {
  2216. const struct qed_common_ops *ops;
  2217. /* qed should learn receive the RSS ids and callbacks */
  2218. ops = edev->ops->common;
  2219. for (i = 0; i < QEDE_RSS_CNT(edev); i++)
  2220. ops->simd_handler_config(edev->cdev,
  2221. &edev->fp_array[i], i,
  2222. qede_simd_fp_handler);
  2223. edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
  2224. }
  2225. return 0;
  2226. }
  2227. static int qede_drain_txq(struct qede_dev *edev,
  2228. struct qede_tx_queue *txq,
  2229. bool allow_drain)
  2230. {
  2231. int rc, cnt = 1000;
  2232. while (txq->sw_tx_cons != txq->sw_tx_prod) {
  2233. if (!cnt) {
  2234. if (allow_drain) {
  2235. DP_NOTICE(edev,
  2236. "Tx queue[%d] is stuck, requesting MCP to drain\n",
  2237. txq->index);
  2238. rc = edev->ops->common->drain(edev->cdev);
  2239. if (rc)
  2240. return rc;
  2241. return qede_drain_txq(edev, txq, false);
  2242. }
  2243. DP_NOTICE(edev,
  2244. "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
  2245. txq->index, txq->sw_tx_prod,
  2246. txq->sw_tx_cons);
  2247. return -ENODEV;
  2248. }
  2249. cnt--;
  2250. usleep_range(1000, 2000);
  2251. barrier();
  2252. }
  2253. /* FW finished processing, wait for HW to transmit all tx packets */
  2254. usleep_range(1000, 2000);
  2255. return 0;
  2256. }
  2257. static int qede_stop_queues(struct qede_dev *edev)
  2258. {
  2259. struct qed_update_vport_params vport_update_params;
  2260. struct qed_dev *cdev = edev->cdev;
  2261. int rc, tc, i;
  2262. /* Disable the vport */
  2263. memset(&vport_update_params, 0, sizeof(vport_update_params));
  2264. vport_update_params.vport_id = 0;
  2265. vport_update_params.update_vport_active_flg = 1;
  2266. vport_update_params.vport_active_flg = 0;
  2267. vport_update_params.update_rss_flg = 0;
  2268. rc = edev->ops->vport_update(cdev, &vport_update_params);
  2269. if (rc) {
  2270. DP_ERR(edev, "Failed to update vport\n");
  2271. return rc;
  2272. }
  2273. /* Flush Tx queues. If needed, request drain from MCP */
  2274. for_each_rss(i) {
  2275. struct qede_fastpath *fp = &edev->fp_array[i];
  2276. for (tc = 0; tc < edev->num_tc; tc++) {
  2277. struct qede_tx_queue *txq = &fp->txqs[tc];
  2278. rc = qede_drain_txq(edev, txq, true);
  2279. if (rc)
  2280. return rc;
  2281. }
  2282. }
  2283. /* Stop all Queues in reverse order*/
  2284. for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
  2285. struct qed_stop_rxq_params rx_params;
  2286. /* Stop the Tx Queue(s)*/
  2287. for (tc = 0; tc < edev->num_tc; tc++) {
  2288. struct qed_stop_txq_params tx_params;
  2289. tx_params.rss_id = i;
  2290. tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
  2291. rc = edev->ops->q_tx_stop(cdev, &tx_params);
  2292. if (rc) {
  2293. DP_ERR(edev, "Failed to stop TXQ #%d\n",
  2294. tx_params.tx_queue_id);
  2295. return rc;
  2296. }
  2297. }
  2298. /* Stop the Rx Queue*/
  2299. memset(&rx_params, 0, sizeof(rx_params));
  2300. rx_params.rss_id = i;
  2301. rx_params.rx_queue_id = i;
  2302. rc = edev->ops->q_rx_stop(cdev, &rx_params);
  2303. if (rc) {
  2304. DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
  2305. return rc;
  2306. }
  2307. }
  2308. /* Stop the vport */
  2309. rc = edev->ops->vport_stop(cdev, 0);
  2310. if (rc)
  2311. DP_ERR(edev, "Failed to stop VPORT\n");
  2312. return rc;
  2313. }
  2314. static int qede_start_queues(struct qede_dev *edev)
  2315. {
  2316. int rc, tc, i;
  2317. int vlan_removal_en = 1;
  2318. struct qed_dev *cdev = edev->cdev;
  2319. struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
  2320. struct qed_update_vport_params vport_update_params;
  2321. struct qed_queue_start_common_params q_params;
  2322. struct qed_start_vport_params start = {0};
  2323. if (!edev->num_rss) {
  2324. DP_ERR(edev,
  2325. "Cannot update V-VPORT as active as there are no Rx queues\n");
  2326. return -EINVAL;
  2327. }
  2328. start.gro_enable = !edev->gro_disable;
  2329. start.mtu = edev->ndev->mtu;
  2330. start.vport_id = 0;
  2331. start.drop_ttl0 = true;
  2332. start.remove_inner_vlan = vlan_removal_en;
  2333. rc = edev->ops->vport_start(cdev, &start);
  2334. if (rc) {
  2335. DP_ERR(edev, "Start V-PORT failed %d\n", rc);
  2336. return rc;
  2337. }
  2338. DP_VERBOSE(edev, NETIF_MSG_IFUP,
  2339. "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
  2340. start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
  2341. for_each_rss(i) {
  2342. struct qede_fastpath *fp = &edev->fp_array[i];
  2343. dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
  2344. memset(&q_params, 0, sizeof(q_params));
  2345. q_params.rss_id = i;
  2346. q_params.queue_id = i;
  2347. q_params.vport_id = 0;
  2348. q_params.sb = fp->sb_info->igu_sb_id;
  2349. q_params.sb_idx = RX_PI;
  2350. rc = edev->ops->q_rx_start(cdev, &q_params,
  2351. fp->rxq->rx_buf_size,
  2352. fp->rxq->rx_bd_ring.p_phys_addr,
  2353. phys_table,
  2354. fp->rxq->rx_comp_ring.page_cnt,
  2355. &fp->rxq->hw_rxq_prod_addr);
  2356. if (rc) {
  2357. DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
  2358. return rc;
  2359. }
  2360. fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
  2361. qede_update_rx_prod(edev, fp->rxq);
  2362. for (tc = 0; tc < edev->num_tc; tc++) {
  2363. struct qede_tx_queue *txq = &fp->txqs[tc];
  2364. int txq_index = tc * QEDE_RSS_CNT(edev) + i;
  2365. memset(&q_params, 0, sizeof(q_params));
  2366. q_params.rss_id = i;
  2367. q_params.queue_id = txq_index;
  2368. q_params.vport_id = 0;
  2369. q_params.sb = fp->sb_info->igu_sb_id;
  2370. q_params.sb_idx = TX_PI(tc);
  2371. rc = edev->ops->q_tx_start(cdev, &q_params,
  2372. txq->tx_pbl.pbl.p_phys_table,
  2373. txq->tx_pbl.page_cnt,
  2374. &txq->doorbell_addr);
  2375. if (rc) {
  2376. DP_ERR(edev, "Start TXQ #%d failed %d\n",
  2377. txq_index, rc);
  2378. return rc;
  2379. }
  2380. txq->hw_cons_ptr =
  2381. &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
  2382. SET_FIELD(txq->tx_db.data.params,
  2383. ETH_DB_DATA_DEST, DB_DEST_XCM);
  2384. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
  2385. DB_AGG_CMD_SET);
  2386. SET_FIELD(txq->tx_db.data.params,
  2387. ETH_DB_DATA_AGG_VAL_SEL,
  2388. DQ_XCM_ETH_TX_BD_PROD_CMD);
  2389. txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
  2390. }
  2391. }
  2392. /* Prepare and send the vport enable */
  2393. memset(&vport_update_params, 0, sizeof(vport_update_params));
  2394. vport_update_params.vport_id = start.vport_id;
  2395. vport_update_params.update_vport_active_flg = 1;
  2396. vport_update_params.vport_active_flg = 1;
  2397. /* Fill struct with RSS params */
  2398. if (QEDE_RSS_CNT(edev) > 1) {
  2399. vport_update_params.update_rss_flg = 1;
  2400. for (i = 0; i < 128; i++)
  2401. rss_params->rss_ind_table[i] =
  2402. ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
  2403. netdev_rss_key_fill(rss_params->rss_key,
  2404. sizeof(rss_params->rss_key));
  2405. } else {
  2406. memset(rss_params, 0, sizeof(*rss_params));
  2407. }
  2408. memcpy(&vport_update_params.rss_params, rss_params,
  2409. sizeof(*rss_params));
  2410. rc = edev->ops->vport_update(cdev, &vport_update_params);
  2411. if (rc) {
  2412. DP_ERR(edev, "Update V-PORT failed %d\n", rc);
  2413. return rc;
  2414. }
  2415. return 0;
  2416. }
  2417. static int qede_set_mcast_rx_mac(struct qede_dev *edev,
  2418. enum qed_filter_xcast_params_type opcode,
  2419. unsigned char *mac, int num_macs)
  2420. {
  2421. struct qed_filter_params filter_cmd;
  2422. int i;
  2423. memset(&filter_cmd, 0, sizeof(filter_cmd));
  2424. filter_cmd.type = QED_FILTER_TYPE_MCAST;
  2425. filter_cmd.filter.mcast.type = opcode;
  2426. filter_cmd.filter.mcast.num = num_macs;
  2427. for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
  2428. ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
  2429. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  2430. }
  2431. enum qede_unload_mode {
  2432. QEDE_UNLOAD_NORMAL,
  2433. };
  2434. static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
  2435. {
  2436. struct qed_link_params link_params;
  2437. int rc;
  2438. DP_INFO(edev, "Starting qede unload\n");
  2439. mutex_lock(&edev->qede_lock);
  2440. edev->state = QEDE_STATE_CLOSED;
  2441. /* Close OS Tx */
  2442. netif_tx_disable(edev->ndev);
  2443. netif_carrier_off(edev->ndev);
  2444. /* Reset the link */
  2445. memset(&link_params, 0, sizeof(link_params));
  2446. link_params.link_up = false;
  2447. edev->ops->common->set_link(edev->cdev, &link_params);
  2448. rc = qede_stop_queues(edev);
  2449. if (rc) {
  2450. qede_sync_free_irqs(edev);
  2451. goto out;
  2452. }
  2453. DP_INFO(edev, "Stopped Queues\n");
  2454. qede_vlan_mark_nonconfigured(edev);
  2455. edev->ops->fastpath_stop(edev->cdev);
  2456. /* Release the interrupts */
  2457. qede_sync_free_irqs(edev);
  2458. edev->ops->common->set_fp_int(edev->cdev, 0);
  2459. qede_napi_disable_remove(edev);
  2460. qede_free_mem_load(edev);
  2461. qede_free_fp_array(edev);
  2462. out:
  2463. mutex_unlock(&edev->qede_lock);
  2464. DP_INFO(edev, "Ending qede unload\n");
  2465. }
  2466. enum qede_load_mode {
  2467. QEDE_LOAD_NORMAL,
  2468. };
  2469. static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
  2470. {
  2471. struct qed_link_params link_params;
  2472. struct qed_link_output link_output;
  2473. int rc;
  2474. DP_INFO(edev, "Starting qede load\n");
  2475. rc = qede_set_num_queues(edev);
  2476. if (rc)
  2477. goto err0;
  2478. rc = qede_alloc_fp_array(edev);
  2479. if (rc)
  2480. goto err0;
  2481. qede_init_fp(edev);
  2482. rc = qede_alloc_mem_load(edev);
  2483. if (rc)
  2484. goto err1;
  2485. DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
  2486. QEDE_RSS_CNT(edev), edev->num_tc);
  2487. rc = qede_set_real_num_queues(edev);
  2488. if (rc)
  2489. goto err2;
  2490. qede_napi_add_enable(edev);
  2491. DP_INFO(edev, "Napi added and enabled\n");
  2492. rc = qede_setup_irqs(edev);
  2493. if (rc)
  2494. goto err3;
  2495. DP_INFO(edev, "Setup IRQs succeeded\n");
  2496. rc = qede_start_queues(edev);
  2497. if (rc)
  2498. goto err4;
  2499. DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
  2500. /* Add primary mac and set Rx filters */
  2501. ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
  2502. mutex_lock(&edev->qede_lock);
  2503. edev->state = QEDE_STATE_OPEN;
  2504. mutex_unlock(&edev->qede_lock);
  2505. /* Program un-configured VLANs */
  2506. qede_configure_vlan_filters(edev);
  2507. /* Ask for link-up using current configuration */
  2508. memset(&link_params, 0, sizeof(link_params));
  2509. link_params.link_up = true;
  2510. edev->ops->common->set_link(edev->cdev, &link_params);
  2511. /* Query whether link is already-up */
  2512. memset(&link_output, 0, sizeof(link_output));
  2513. edev->ops->common->get_link(edev->cdev, &link_output);
  2514. qede_link_update(edev, &link_output);
  2515. DP_INFO(edev, "Ending successfully qede load\n");
  2516. return 0;
  2517. err4:
  2518. qede_sync_free_irqs(edev);
  2519. memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
  2520. err3:
  2521. qede_napi_disable_remove(edev);
  2522. err2:
  2523. qede_free_mem_load(edev);
  2524. err1:
  2525. edev->ops->common->set_fp_int(edev->cdev, 0);
  2526. qede_free_fp_array(edev);
  2527. edev->num_rss = 0;
  2528. err0:
  2529. return rc;
  2530. }
  2531. void qede_reload(struct qede_dev *edev,
  2532. void (*func)(struct qede_dev *, union qede_reload_args *),
  2533. union qede_reload_args *args)
  2534. {
  2535. qede_unload(edev, QEDE_UNLOAD_NORMAL);
  2536. /* Call function handler to update parameters
  2537. * needed for function load.
  2538. */
  2539. if (func)
  2540. func(edev, args);
  2541. qede_load(edev, QEDE_LOAD_NORMAL);
  2542. mutex_lock(&edev->qede_lock);
  2543. qede_config_rx_mode(edev->ndev);
  2544. mutex_unlock(&edev->qede_lock);
  2545. }
  2546. /* called with rtnl_lock */
  2547. static int qede_open(struct net_device *ndev)
  2548. {
  2549. struct qede_dev *edev = netdev_priv(ndev);
  2550. netif_carrier_off(ndev);
  2551. edev->ops->common->set_power_state(edev->cdev, PCI_D0);
  2552. return qede_load(edev, QEDE_LOAD_NORMAL);
  2553. }
  2554. static int qede_close(struct net_device *ndev)
  2555. {
  2556. struct qede_dev *edev = netdev_priv(ndev);
  2557. qede_unload(edev, QEDE_UNLOAD_NORMAL);
  2558. return 0;
  2559. }
  2560. static void qede_link_update(void *dev, struct qed_link_output *link)
  2561. {
  2562. struct qede_dev *edev = dev;
  2563. if (!netif_running(edev->ndev)) {
  2564. DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
  2565. return;
  2566. }
  2567. if (link->link_up) {
  2568. if (!netif_carrier_ok(edev->ndev)) {
  2569. DP_NOTICE(edev, "Link is up\n");
  2570. netif_tx_start_all_queues(edev->ndev);
  2571. netif_carrier_on(edev->ndev);
  2572. }
  2573. } else {
  2574. if (netif_carrier_ok(edev->ndev)) {
  2575. DP_NOTICE(edev, "Link is down\n");
  2576. netif_tx_disable(edev->ndev);
  2577. netif_carrier_off(edev->ndev);
  2578. }
  2579. }
  2580. }
  2581. static int qede_set_mac_addr(struct net_device *ndev, void *p)
  2582. {
  2583. struct qede_dev *edev = netdev_priv(ndev);
  2584. struct sockaddr *addr = p;
  2585. int rc;
  2586. ASSERT_RTNL(); /* @@@TBD To be removed */
  2587. DP_INFO(edev, "Set_mac_addr called\n");
  2588. if (!is_valid_ether_addr(addr->sa_data)) {
  2589. DP_NOTICE(edev, "The MAC address is not valid\n");
  2590. return -EFAULT;
  2591. }
  2592. ether_addr_copy(ndev->dev_addr, addr->sa_data);
  2593. if (!netif_running(ndev)) {
  2594. DP_NOTICE(edev, "The device is currently down\n");
  2595. return 0;
  2596. }
  2597. /* Remove the previous primary mac */
  2598. rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
  2599. edev->primary_mac);
  2600. if (rc)
  2601. return rc;
  2602. /* Add MAC filter according to the new unicast HW MAC address */
  2603. ether_addr_copy(edev->primary_mac, ndev->dev_addr);
  2604. return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
  2605. edev->primary_mac);
  2606. }
  2607. static int
  2608. qede_configure_mcast_filtering(struct net_device *ndev,
  2609. enum qed_filter_rx_mode_type *accept_flags)
  2610. {
  2611. struct qede_dev *edev = netdev_priv(ndev);
  2612. unsigned char *mc_macs, *temp;
  2613. struct netdev_hw_addr *ha;
  2614. int rc = 0, mc_count;
  2615. size_t size;
  2616. size = 64 * ETH_ALEN;
  2617. mc_macs = kzalloc(size, GFP_KERNEL);
  2618. if (!mc_macs) {
  2619. DP_NOTICE(edev,
  2620. "Failed to allocate memory for multicast MACs\n");
  2621. rc = -ENOMEM;
  2622. goto exit;
  2623. }
  2624. temp = mc_macs;
  2625. /* Remove all previously configured MAC filters */
  2626. rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
  2627. mc_macs, 1);
  2628. if (rc)
  2629. goto exit;
  2630. netif_addr_lock_bh(ndev);
  2631. mc_count = netdev_mc_count(ndev);
  2632. if (mc_count < 64) {
  2633. netdev_for_each_mc_addr(ha, ndev) {
  2634. ether_addr_copy(temp, ha->addr);
  2635. temp += ETH_ALEN;
  2636. }
  2637. }
  2638. netif_addr_unlock_bh(ndev);
  2639. /* Check for all multicast @@@TBD resource allocation */
  2640. if ((ndev->flags & IFF_ALLMULTI) ||
  2641. (mc_count > 64)) {
  2642. if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
  2643. *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
  2644. } else {
  2645. /* Add all multicast MAC filters */
  2646. rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
  2647. mc_macs, mc_count);
  2648. }
  2649. exit:
  2650. kfree(mc_macs);
  2651. return rc;
  2652. }
  2653. static void qede_set_rx_mode(struct net_device *ndev)
  2654. {
  2655. struct qede_dev *edev = netdev_priv(ndev);
  2656. DP_INFO(edev, "qede_set_rx_mode called\n");
  2657. if (edev->state != QEDE_STATE_OPEN) {
  2658. DP_INFO(edev,
  2659. "qede_set_rx_mode called while interface is down\n");
  2660. } else {
  2661. set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
  2662. schedule_delayed_work(&edev->sp_task, 0);
  2663. }
  2664. }
  2665. /* Must be called with qede_lock held */
  2666. static void qede_config_rx_mode(struct net_device *ndev)
  2667. {
  2668. enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
  2669. struct qede_dev *edev = netdev_priv(ndev);
  2670. struct qed_filter_params rx_mode;
  2671. unsigned char *uc_macs, *temp;
  2672. struct netdev_hw_addr *ha;
  2673. int rc, uc_count;
  2674. size_t size;
  2675. netif_addr_lock_bh(ndev);
  2676. uc_count = netdev_uc_count(ndev);
  2677. size = uc_count * ETH_ALEN;
  2678. uc_macs = kzalloc(size, GFP_ATOMIC);
  2679. if (!uc_macs) {
  2680. DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
  2681. netif_addr_unlock_bh(ndev);
  2682. return;
  2683. }
  2684. temp = uc_macs;
  2685. netdev_for_each_uc_addr(ha, ndev) {
  2686. ether_addr_copy(temp, ha->addr);
  2687. temp += ETH_ALEN;
  2688. }
  2689. netif_addr_unlock_bh(ndev);
  2690. /* Configure the struct for the Rx mode */
  2691. memset(&rx_mode, 0, sizeof(struct qed_filter_params));
  2692. rx_mode.type = QED_FILTER_TYPE_RX_MODE;
  2693. /* Remove all previous unicast secondary macs and multicast macs
  2694. * (configrue / leave the primary mac)
  2695. */
  2696. rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
  2697. edev->primary_mac);
  2698. if (rc)
  2699. goto out;
  2700. /* Check for promiscuous */
  2701. if ((ndev->flags & IFF_PROMISC) ||
  2702. (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
  2703. accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
  2704. } else {
  2705. /* Add MAC filters according to the unicast secondary macs */
  2706. int i;
  2707. temp = uc_macs;
  2708. for (i = 0; i < uc_count; i++) {
  2709. rc = qede_set_ucast_rx_mac(edev,
  2710. QED_FILTER_XCAST_TYPE_ADD,
  2711. temp);
  2712. if (rc)
  2713. goto out;
  2714. temp += ETH_ALEN;
  2715. }
  2716. rc = qede_configure_mcast_filtering(ndev, &accept_flags);
  2717. if (rc)
  2718. goto out;
  2719. }
  2720. /* take care of VLAN mode */
  2721. if (ndev->flags & IFF_PROMISC) {
  2722. qede_config_accept_any_vlan(edev, true);
  2723. } else if (!edev->non_configured_vlans) {
  2724. /* It's possible that accept_any_vlan mode is set due to a
  2725. * previous setting of IFF_PROMISC. If vlan credits are
  2726. * sufficient, disable accept_any_vlan.
  2727. */
  2728. qede_config_accept_any_vlan(edev, false);
  2729. }
  2730. rx_mode.filter.accept_flags = accept_flags;
  2731. edev->ops->filter_config(edev->cdev, &rx_mode);
  2732. out:
  2733. kfree(uc_macs);
  2734. }