ena_netdev.c 94 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616
  1. /*
  2. * Copyright 2015 Amazon.com, Inc. or its affiliates.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #ifdef CONFIG_RFS_ACCEL
  34. #include <linux/cpu_rmap.h>
  35. #endif /* CONFIG_RFS_ACCEL */
  36. #include <linux/ethtool.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/kernel.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleparam.h>
  41. #include <linux/numa.h>
  42. #include <linux/pci.h>
  43. #include <linux/utsname.h>
  44. #include <linux/version.h>
  45. #include <linux/vmalloc.h>
  46. #include <net/ip.h>
  47. #include "ena_netdev.h"
  48. #include "ena_pci_id_tbl.h"
  49. static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
  50. MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
  51. MODULE_DESCRIPTION(DEVICE_NAME);
  52. MODULE_LICENSE("GPL");
  53. MODULE_VERSION(DRV_MODULE_VERSION);
  54. /* Time in jiffies before concluding the transmitter is hung. */
  55. #define TX_TIMEOUT (5 * HZ)
  56. #define ENA_NAPI_BUDGET 64
  57. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
  58. NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
  59. static int debug = -1;
  60. module_param(debug, int, 0);
  61. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  62. static struct ena_aenq_handlers aenq_handlers;
  63. static struct workqueue_struct *ena_wq;
  64. MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
  65. static int ena_rss_init_default(struct ena_adapter *adapter);
  66. static void check_for_admin_com_state(struct ena_adapter *adapter);
  67. static void ena_destroy_device(struct ena_adapter *adapter);
  68. static int ena_restore_device(struct ena_adapter *adapter);
  69. static void ena_tx_timeout(struct net_device *dev)
  70. {
  71. struct ena_adapter *adapter = netdev_priv(dev);
  72. /* Change the state of the device to trigger reset
  73. * Check that we are not in the middle or a trigger already
  74. */
  75. if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
  76. return;
  77. adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
  78. u64_stats_update_begin(&adapter->syncp);
  79. adapter->dev_stats.tx_timeout++;
  80. u64_stats_update_end(&adapter->syncp);
  81. netif_err(adapter, tx_err, dev, "Transmit time out\n");
  82. }
  83. static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
  84. {
  85. int i;
  86. for (i = 0; i < adapter->num_queues; i++)
  87. adapter->rx_ring[i].mtu = mtu;
  88. }
  89. static int ena_change_mtu(struct net_device *dev, int new_mtu)
  90. {
  91. struct ena_adapter *adapter = netdev_priv(dev);
  92. int ret;
  93. ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
  94. if (!ret) {
  95. netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
  96. update_rx_ring_mtu(adapter, new_mtu);
  97. dev->mtu = new_mtu;
  98. } else {
  99. netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
  100. new_mtu);
  101. }
  102. return ret;
  103. }
  104. static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
  105. {
  106. #ifdef CONFIG_RFS_ACCEL
  107. u32 i;
  108. int rc;
  109. adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
  110. if (!adapter->netdev->rx_cpu_rmap)
  111. return -ENOMEM;
  112. for (i = 0; i < adapter->num_queues; i++) {
  113. int irq_idx = ENA_IO_IRQ_IDX(i);
  114. rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
  115. pci_irq_vector(adapter->pdev, irq_idx));
  116. if (rc) {
  117. free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
  118. adapter->netdev->rx_cpu_rmap = NULL;
  119. return rc;
  120. }
  121. }
  122. #endif /* CONFIG_RFS_ACCEL */
  123. return 0;
  124. }
  125. static void ena_init_io_rings_common(struct ena_adapter *adapter,
  126. struct ena_ring *ring, u16 qid)
  127. {
  128. ring->qid = qid;
  129. ring->pdev = adapter->pdev;
  130. ring->dev = &adapter->pdev->dev;
  131. ring->netdev = adapter->netdev;
  132. ring->napi = &adapter->ena_napi[qid].napi;
  133. ring->adapter = adapter;
  134. ring->ena_dev = adapter->ena_dev;
  135. ring->per_napi_packets = 0;
  136. ring->per_napi_bytes = 0;
  137. ring->cpu = 0;
  138. ring->first_interrupt = false;
  139. ring->no_interrupt_event_cnt = 0;
  140. u64_stats_init(&ring->syncp);
  141. }
  142. static void ena_init_io_rings(struct ena_adapter *adapter)
  143. {
  144. struct ena_com_dev *ena_dev;
  145. struct ena_ring *txr, *rxr;
  146. int i;
  147. ena_dev = adapter->ena_dev;
  148. for (i = 0; i < adapter->num_queues; i++) {
  149. txr = &adapter->tx_ring[i];
  150. rxr = &adapter->rx_ring[i];
  151. /* TX/RX common ring state */
  152. ena_init_io_rings_common(adapter, txr, i);
  153. ena_init_io_rings_common(adapter, rxr, i);
  154. /* TX specific ring state */
  155. txr->ring_size = adapter->tx_ring_size;
  156. txr->tx_max_header_size = ena_dev->tx_max_header_size;
  157. txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
  158. txr->sgl_size = adapter->max_tx_sgl_size;
  159. txr->smoothed_interval =
  160. ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
  161. /* RX specific ring state */
  162. rxr->ring_size = adapter->rx_ring_size;
  163. rxr->rx_copybreak = adapter->rx_copybreak;
  164. rxr->sgl_size = adapter->max_rx_sgl_size;
  165. rxr->smoothed_interval =
  166. ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
  167. rxr->empty_rx_queue = 0;
  168. }
  169. }
  170. /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
  171. * @adapter: network interface device structure
  172. * @qid: queue index
  173. *
  174. * Return 0 on success, negative on failure
  175. */
  176. static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
  177. {
  178. struct ena_ring *tx_ring = &adapter->tx_ring[qid];
  179. struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
  180. int size, i, node;
  181. if (tx_ring->tx_buffer_info) {
  182. netif_err(adapter, ifup,
  183. adapter->netdev, "tx_buffer_info info is not NULL");
  184. return -EEXIST;
  185. }
  186. size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
  187. node = cpu_to_node(ena_irq->cpu);
  188. tx_ring->tx_buffer_info = vzalloc_node(size, node);
  189. if (!tx_ring->tx_buffer_info) {
  190. tx_ring->tx_buffer_info = vzalloc(size);
  191. if (!tx_ring->tx_buffer_info)
  192. return -ENOMEM;
  193. }
  194. size = sizeof(u16) * tx_ring->ring_size;
  195. tx_ring->free_tx_ids = vzalloc_node(size, node);
  196. if (!tx_ring->free_tx_ids) {
  197. tx_ring->free_tx_ids = vzalloc(size);
  198. if (!tx_ring->free_tx_ids) {
  199. vfree(tx_ring->tx_buffer_info);
  200. return -ENOMEM;
  201. }
  202. }
  203. /* Req id ring for TX out of order completions */
  204. for (i = 0; i < tx_ring->ring_size; i++)
  205. tx_ring->free_tx_ids[i] = i;
  206. /* Reset tx statistics */
  207. memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
  208. tx_ring->next_to_use = 0;
  209. tx_ring->next_to_clean = 0;
  210. tx_ring->cpu = ena_irq->cpu;
  211. return 0;
  212. }
  213. /* ena_free_tx_resources - Free I/O Tx Resources per Queue
  214. * @adapter: network interface device structure
  215. * @qid: queue index
  216. *
  217. * Free all transmit software resources
  218. */
  219. static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
  220. {
  221. struct ena_ring *tx_ring = &adapter->tx_ring[qid];
  222. vfree(tx_ring->tx_buffer_info);
  223. tx_ring->tx_buffer_info = NULL;
  224. vfree(tx_ring->free_tx_ids);
  225. tx_ring->free_tx_ids = NULL;
  226. }
  227. /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
  228. * @adapter: private structure
  229. *
  230. * Return 0 on success, negative on failure
  231. */
  232. static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
  233. {
  234. int i, rc = 0;
  235. for (i = 0; i < adapter->num_queues; i++) {
  236. rc = ena_setup_tx_resources(adapter, i);
  237. if (rc)
  238. goto err_setup_tx;
  239. }
  240. return 0;
  241. err_setup_tx:
  242. netif_err(adapter, ifup, adapter->netdev,
  243. "Tx queue %d: allocation failed\n", i);
  244. /* rewind the index freeing the rings as we go */
  245. while (i--)
  246. ena_free_tx_resources(adapter, i);
  247. return rc;
  248. }
  249. /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
  250. * @adapter: board private structure
  251. *
  252. * Free all transmit software resources
  253. */
  254. static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
  255. {
  256. int i;
  257. for (i = 0; i < adapter->num_queues; i++)
  258. ena_free_tx_resources(adapter, i);
  259. }
  260. static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
  261. {
  262. if (likely(req_id < rx_ring->ring_size))
  263. return 0;
  264. netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
  265. "Invalid rx req_id: %hu\n", req_id);
  266. u64_stats_update_begin(&rx_ring->syncp);
  267. rx_ring->rx_stats.bad_req_id++;
  268. u64_stats_update_end(&rx_ring->syncp);
  269. /* Trigger device reset */
  270. rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
  271. set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
  272. return -EFAULT;
  273. }
  274. /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
  275. * @adapter: network interface device structure
  276. * @qid: queue index
  277. *
  278. * Returns 0 on success, negative on failure
  279. */
  280. static int ena_setup_rx_resources(struct ena_adapter *adapter,
  281. u32 qid)
  282. {
  283. struct ena_ring *rx_ring = &adapter->rx_ring[qid];
  284. struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
  285. int size, node, i;
  286. if (rx_ring->rx_buffer_info) {
  287. netif_err(adapter, ifup, adapter->netdev,
  288. "rx_buffer_info is not NULL");
  289. return -EEXIST;
  290. }
  291. /* alloc extra element so in rx path
  292. * we can always prefetch rx_info + 1
  293. */
  294. size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
  295. node = cpu_to_node(ena_irq->cpu);
  296. rx_ring->rx_buffer_info = vzalloc_node(size, node);
  297. if (!rx_ring->rx_buffer_info) {
  298. rx_ring->rx_buffer_info = vzalloc(size);
  299. if (!rx_ring->rx_buffer_info)
  300. return -ENOMEM;
  301. }
  302. size = sizeof(u16) * rx_ring->ring_size;
  303. rx_ring->free_rx_ids = vzalloc_node(size, node);
  304. if (!rx_ring->free_rx_ids) {
  305. rx_ring->free_rx_ids = vzalloc(size);
  306. if (!rx_ring->free_rx_ids) {
  307. vfree(rx_ring->rx_buffer_info);
  308. return -ENOMEM;
  309. }
  310. }
  311. /* Req id ring for receiving RX pkts out of order */
  312. for (i = 0; i < rx_ring->ring_size; i++)
  313. rx_ring->free_rx_ids[i] = i;
  314. /* Reset rx statistics */
  315. memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
  316. rx_ring->next_to_clean = 0;
  317. rx_ring->next_to_use = 0;
  318. rx_ring->cpu = ena_irq->cpu;
  319. return 0;
  320. }
  321. /* ena_free_rx_resources - Free I/O Rx Resources
  322. * @adapter: network interface device structure
  323. * @qid: queue index
  324. *
  325. * Free all receive software resources
  326. */
  327. static void ena_free_rx_resources(struct ena_adapter *adapter,
  328. u32 qid)
  329. {
  330. struct ena_ring *rx_ring = &adapter->rx_ring[qid];
  331. vfree(rx_ring->rx_buffer_info);
  332. rx_ring->rx_buffer_info = NULL;
  333. vfree(rx_ring->free_rx_ids);
  334. rx_ring->free_rx_ids = NULL;
  335. }
  336. /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
  337. * @adapter: board private structure
  338. *
  339. * Return 0 on success, negative on failure
  340. */
  341. static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
  342. {
  343. int i, rc = 0;
  344. for (i = 0; i < adapter->num_queues; i++) {
  345. rc = ena_setup_rx_resources(adapter, i);
  346. if (rc)
  347. goto err_setup_rx;
  348. }
  349. return 0;
  350. err_setup_rx:
  351. netif_err(adapter, ifup, adapter->netdev,
  352. "Rx queue %d: allocation failed\n", i);
  353. /* rewind the index freeing the rings as we go */
  354. while (i--)
  355. ena_free_rx_resources(adapter, i);
  356. return rc;
  357. }
  358. /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
  359. * @adapter: board private structure
  360. *
  361. * Free all receive software resources
  362. */
  363. static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
  364. {
  365. int i;
  366. for (i = 0; i < adapter->num_queues; i++)
  367. ena_free_rx_resources(adapter, i);
  368. }
  369. static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
  370. struct ena_rx_buffer *rx_info, gfp_t gfp)
  371. {
  372. struct ena_com_buf *ena_buf;
  373. struct page *page;
  374. dma_addr_t dma;
  375. /* if previous allocated page is not used */
  376. if (unlikely(rx_info->page))
  377. return 0;
  378. page = alloc_page(gfp);
  379. if (unlikely(!page)) {
  380. u64_stats_update_begin(&rx_ring->syncp);
  381. rx_ring->rx_stats.page_alloc_fail++;
  382. u64_stats_update_end(&rx_ring->syncp);
  383. return -ENOMEM;
  384. }
  385. dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
  386. DMA_FROM_DEVICE);
  387. if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
  388. u64_stats_update_begin(&rx_ring->syncp);
  389. rx_ring->rx_stats.dma_mapping_err++;
  390. u64_stats_update_end(&rx_ring->syncp);
  391. __free_page(page);
  392. return -EIO;
  393. }
  394. netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
  395. "alloc page %p, rx_info %p\n", page, rx_info);
  396. rx_info->page = page;
  397. rx_info->page_offset = 0;
  398. ena_buf = &rx_info->ena_buf;
  399. ena_buf->paddr = dma;
  400. ena_buf->len = PAGE_SIZE;
  401. return 0;
  402. }
  403. static void ena_free_rx_page(struct ena_ring *rx_ring,
  404. struct ena_rx_buffer *rx_info)
  405. {
  406. struct page *page = rx_info->page;
  407. struct ena_com_buf *ena_buf = &rx_info->ena_buf;
  408. if (unlikely(!page)) {
  409. netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
  410. "Trying to free unallocated buffer\n");
  411. return;
  412. }
  413. dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
  414. DMA_FROM_DEVICE);
  415. __free_page(page);
  416. rx_info->page = NULL;
  417. }
  418. static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
  419. {
  420. u16 next_to_use, req_id;
  421. u32 i;
  422. int rc;
  423. next_to_use = rx_ring->next_to_use;
  424. for (i = 0; i < num; i++) {
  425. struct ena_rx_buffer *rx_info;
  426. req_id = rx_ring->free_rx_ids[next_to_use];
  427. rc = validate_rx_req_id(rx_ring, req_id);
  428. if (unlikely(rc < 0))
  429. break;
  430. rx_info = &rx_ring->rx_buffer_info[req_id];
  431. rc = ena_alloc_rx_page(rx_ring, rx_info,
  432. GFP_ATOMIC | __GFP_COMP);
  433. if (unlikely(rc < 0)) {
  434. netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
  435. "failed to alloc buffer for rx queue %d\n",
  436. rx_ring->qid);
  437. break;
  438. }
  439. rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
  440. &rx_info->ena_buf,
  441. req_id);
  442. if (unlikely(rc)) {
  443. netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
  444. "failed to add buffer for rx queue %d\n",
  445. rx_ring->qid);
  446. break;
  447. }
  448. next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
  449. rx_ring->ring_size);
  450. }
  451. if (unlikely(i < num)) {
  452. u64_stats_update_begin(&rx_ring->syncp);
  453. rx_ring->rx_stats.refil_partial++;
  454. u64_stats_update_end(&rx_ring->syncp);
  455. netdev_warn(rx_ring->netdev,
  456. "refilled rx qid %d with only %d buffers (from %d)\n",
  457. rx_ring->qid, i, num);
  458. }
  459. if (likely(i)) {
  460. /* Add memory barrier to make sure the desc were written before
  461. * issue a doorbell
  462. */
  463. wmb();
  464. ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
  465. mmiowb();
  466. }
  467. rx_ring->next_to_use = next_to_use;
  468. return i;
  469. }
  470. static void ena_free_rx_bufs(struct ena_adapter *adapter,
  471. u32 qid)
  472. {
  473. struct ena_ring *rx_ring = &adapter->rx_ring[qid];
  474. u32 i;
  475. for (i = 0; i < rx_ring->ring_size; i++) {
  476. struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
  477. if (rx_info->page)
  478. ena_free_rx_page(rx_ring, rx_info);
  479. }
  480. }
  481. /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
  482. * @adapter: board private structure
  483. *
  484. */
  485. static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
  486. {
  487. struct ena_ring *rx_ring;
  488. int i, rc, bufs_num;
  489. for (i = 0; i < adapter->num_queues; i++) {
  490. rx_ring = &adapter->rx_ring[i];
  491. bufs_num = rx_ring->ring_size - 1;
  492. rc = ena_refill_rx_bufs(rx_ring, bufs_num);
  493. if (unlikely(rc != bufs_num))
  494. netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
  495. "refilling Queue %d failed. allocated %d buffers from: %d\n",
  496. i, rc, bufs_num);
  497. }
  498. }
  499. static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
  500. {
  501. int i;
  502. for (i = 0; i < adapter->num_queues; i++)
  503. ena_free_rx_bufs(adapter, i);
  504. }
  505. /* ena_free_tx_bufs - Free Tx Buffers per Queue
  506. * @tx_ring: TX ring for which buffers be freed
  507. */
  508. static void ena_free_tx_bufs(struct ena_ring *tx_ring)
  509. {
  510. bool print_once = true;
  511. u32 i;
  512. for (i = 0; i < tx_ring->ring_size; i++) {
  513. struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
  514. struct ena_com_buf *ena_buf;
  515. int nr_frags;
  516. int j;
  517. if (!tx_info->skb)
  518. continue;
  519. if (print_once) {
  520. netdev_notice(tx_ring->netdev,
  521. "free uncompleted tx skb qid %d idx 0x%x\n",
  522. tx_ring->qid, i);
  523. print_once = false;
  524. } else {
  525. netdev_dbg(tx_ring->netdev,
  526. "free uncompleted tx skb qid %d idx 0x%x\n",
  527. tx_ring->qid, i);
  528. }
  529. ena_buf = tx_info->bufs;
  530. dma_unmap_single(tx_ring->dev,
  531. ena_buf->paddr,
  532. ena_buf->len,
  533. DMA_TO_DEVICE);
  534. /* unmap remaining mapped pages */
  535. nr_frags = tx_info->num_of_bufs - 1;
  536. for (j = 0; j < nr_frags; j++) {
  537. ena_buf++;
  538. dma_unmap_page(tx_ring->dev,
  539. ena_buf->paddr,
  540. ena_buf->len,
  541. DMA_TO_DEVICE);
  542. }
  543. dev_kfree_skb_any(tx_info->skb);
  544. }
  545. netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
  546. tx_ring->qid));
  547. }
  548. static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
  549. {
  550. struct ena_ring *tx_ring;
  551. int i;
  552. for (i = 0; i < adapter->num_queues; i++) {
  553. tx_ring = &adapter->tx_ring[i];
  554. ena_free_tx_bufs(tx_ring);
  555. }
  556. }
  557. static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
  558. {
  559. u16 ena_qid;
  560. int i;
  561. for (i = 0; i < adapter->num_queues; i++) {
  562. ena_qid = ENA_IO_TXQ_IDX(i);
  563. ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
  564. }
  565. }
  566. static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
  567. {
  568. u16 ena_qid;
  569. int i;
  570. for (i = 0; i < adapter->num_queues; i++) {
  571. ena_qid = ENA_IO_RXQ_IDX(i);
  572. ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
  573. }
  574. }
  575. static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
  576. {
  577. ena_destroy_all_tx_queues(adapter);
  578. ena_destroy_all_rx_queues(adapter);
  579. }
  580. static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
  581. {
  582. struct ena_tx_buffer *tx_info = NULL;
  583. if (likely(req_id < tx_ring->ring_size)) {
  584. tx_info = &tx_ring->tx_buffer_info[req_id];
  585. if (likely(tx_info->skb))
  586. return 0;
  587. }
  588. if (tx_info)
  589. netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
  590. "tx_info doesn't have valid skb\n");
  591. else
  592. netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
  593. "Invalid req_id: %hu\n", req_id);
  594. u64_stats_update_begin(&tx_ring->syncp);
  595. tx_ring->tx_stats.bad_req_id++;
  596. u64_stats_update_end(&tx_ring->syncp);
  597. /* Trigger device reset */
  598. tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
  599. set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
  600. return -EFAULT;
  601. }
  602. static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
  603. {
  604. struct netdev_queue *txq;
  605. bool above_thresh;
  606. u32 tx_bytes = 0;
  607. u32 total_done = 0;
  608. u16 next_to_clean;
  609. u16 req_id;
  610. int tx_pkts = 0;
  611. int rc;
  612. next_to_clean = tx_ring->next_to_clean;
  613. txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
  614. while (tx_pkts < budget) {
  615. struct ena_tx_buffer *tx_info;
  616. struct sk_buff *skb;
  617. struct ena_com_buf *ena_buf;
  618. int i, nr_frags;
  619. rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
  620. &req_id);
  621. if (rc)
  622. break;
  623. rc = validate_tx_req_id(tx_ring, req_id);
  624. if (rc)
  625. break;
  626. tx_info = &tx_ring->tx_buffer_info[req_id];
  627. skb = tx_info->skb;
  628. /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
  629. prefetch(&skb->end);
  630. tx_info->skb = NULL;
  631. tx_info->last_jiffies = 0;
  632. if (likely(tx_info->num_of_bufs != 0)) {
  633. ena_buf = tx_info->bufs;
  634. dma_unmap_single(tx_ring->dev,
  635. dma_unmap_addr(ena_buf, paddr),
  636. dma_unmap_len(ena_buf, len),
  637. DMA_TO_DEVICE);
  638. /* unmap remaining mapped pages */
  639. nr_frags = tx_info->num_of_bufs - 1;
  640. for (i = 0; i < nr_frags; i++) {
  641. ena_buf++;
  642. dma_unmap_page(tx_ring->dev,
  643. dma_unmap_addr(ena_buf, paddr),
  644. dma_unmap_len(ena_buf, len),
  645. DMA_TO_DEVICE);
  646. }
  647. }
  648. netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
  649. "tx_poll: q %d skb %p completed\n", tx_ring->qid,
  650. skb);
  651. tx_bytes += skb->len;
  652. dev_kfree_skb(skb);
  653. tx_pkts++;
  654. total_done += tx_info->tx_descs;
  655. tx_ring->free_tx_ids[next_to_clean] = req_id;
  656. next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
  657. tx_ring->ring_size);
  658. }
  659. tx_ring->next_to_clean = next_to_clean;
  660. ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
  661. ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
  662. netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
  663. netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
  664. "tx_poll: q %d done. total pkts: %d\n",
  665. tx_ring->qid, tx_pkts);
  666. /* need to make the rings circular update visible to
  667. * ena_start_xmit() before checking for netif_queue_stopped().
  668. */
  669. smp_mb();
  670. above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
  671. ENA_TX_WAKEUP_THRESH;
  672. if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
  673. __netif_tx_lock(txq, smp_processor_id());
  674. above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
  675. ENA_TX_WAKEUP_THRESH;
  676. if (netif_tx_queue_stopped(txq) && above_thresh) {
  677. netif_tx_wake_queue(txq);
  678. u64_stats_update_begin(&tx_ring->syncp);
  679. tx_ring->tx_stats.queue_wakeup++;
  680. u64_stats_update_end(&tx_ring->syncp);
  681. }
  682. __netif_tx_unlock(txq);
  683. }
  684. tx_ring->per_napi_bytes += tx_bytes;
  685. tx_ring->per_napi_packets += tx_pkts;
  686. return tx_pkts;
  687. }
  688. static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
  689. {
  690. struct sk_buff *skb;
  691. if (frags)
  692. skb = napi_get_frags(rx_ring->napi);
  693. else
  694. skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
  695. rx_ring->rx_copybreak);
  696. if (unlikely(!skb)) {
  697. u64_stats_update_begin(&rx_ring->syncp);
  698. rx_ring->rx_stats.skb_alloc_fail++;
  699. u64_stats_update_end(&rx_ring->syncp);
  700. netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
  701. "Failed to allocate skb. frags: %d\n", frags);
  702. return NULL;
  703. }
  704. return skb;
  705. }
  706. static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
  707. struct ena_com_rx_buf_info *ena_bufs,
  708. u32 descs,
  709. u16 *next_to_clean)
  710. {
  711. struct sk_buff *skb;
  712. struct ena_rx_buffer *rx_info;
  713. u16 len, req_id, buf = 0;
  714. void *va;
  715. len = ena_bufs[buf].len;
  716. req_id = ena_bufs[buf].req_id;
  717. rx_info = &rx_ring->rx_buffer_info[req_id];
  718. if (unlikely(!rx_info->page)) {
  719. netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
  720. "Page is NULL\n");
  721. return NULL;
  722. }
  723. netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
  724. "rx_info %p page %p\n",
  725. rx_info, rx_info->page);
  726. /* save virt address of first buffer */
  727. va = page_address(rx_info->page) + rx_info->page_offset;
  728. prefetch(va + NET_IP_ALIGN);
  729. if (len <= rx_ring->rx_copybreak) {
  730. skb = ena_alloc_skb(rx_ring, false);
  731. if (unlikely(!skb))
  732. return NULL;
  733. netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
  734. "rx allocated small packet. len %d. data_len %d\n",
  735. skb->len, skb->data_len);
  736. /* sync this buffer for CPU use */
  737. dma_sync_single_for_cpu(rx_ring->dev,
  738. dma_unmap_addr(&rx_info->ena_buf, paddr),
  739. len,
  740. DMA_FROM_DEVICE);
  741. skb_copy_to_linear_data(skb, va, len);
  742. dma_sync_single_for_device(rx_ring->dev,
  743. dma_unmap_addr(&rx_info->ena_buf, paddr),
  744. len,
  745. DMA_FROM_DEVICE);
  746. skb_put(skb, len);
  747. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  748. rx_ring->free_rx_ids[*next_to_clean] = req_id;
  749. *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
  750. rx_ring->ring_size);
  751. return skb;
  752. }
  753. skb = ena_alloc_skb(rx_ring, true);
  754. if (unlikely(!skb))
  755. return NULL;
  756. do {
  757. dma_unmap_page(rx_ring->dev,
  758. dma_unmap_addr(&rx_info->ena_buf, paddr),
  759. PAGE_SIZE, DMA_FROM_DEVICE);
  760. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
  761. rx_info->page_offset, len, PAGE_SIZE);
  762. netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
  763. "rx skb updated. len %d. data_len %d\n",
  764. skb->len, skb->data_len);
  765. rx_info->page = NULL;
  766. rx_ring->free_rx_ids[*next_to_clean] = req_id;
  767. *next_to_clean =
  768. ENA_RX_RING_IDX_NEXT(*next_to_clean,
  769. rx_ring->ring_size);
  770. if (likely(--descs == 0))
  771. break;
  772. buf++;
  773. len = ena_bufs[buf].len;
  774. req_id = ena_bufs[buf].req_id;
  775. rx_info = &rx_ring->rx_buffer_info[req_id];
  776. } while (1);
  777. return skb;
  778. }
  779. /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
  780. * @adapter: structure containing adapter specific data
  781. * @ena_rx_ctx: received packet context/metadata
  782. * @skb: skb currently being received and modified
  783. */
  784. static inline void ena_rx_checksum(struct ena_ring *rx_ring,
  785. struct ena_com_rx_ctx *ena_rx_ctx,
  786. struct sk_buff *skb)
  787. {
  788. /* Rx csum disabled */
  789. if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
  790. skb->ip_summed = CHECKSUM_NONE;
  791. return;
  792. }
  793. /* For fragmented packets the checksum isn't valid */
  794. if (ena_rx_ctx->frag) {
  795. skb->ip_summed = CHECKSUM_NONE;
  796. return;
  797. }
  798. /* if IP and error */
  799. if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
  800. (ena_rx_ctx->l3_csum_err))) {
  801. /* ipv4 checksum error */
  802. skb->ip_summed = CHECKSUM_NONE;
  803. u64_stats_update_begin(&rx_ring->syncp);
  804. rx_ring->rx_stats.bad_csum++;
  805. u64_stats_update_end(&rx_ring->syncp);
  806. netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
  807. "RX IPv4 header checksum error\n");
  808. return;
  809. }
  810. /* if TCP/UDP */
  811. if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
  812. (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
  813. if (unlikely(ena_rx_ctx->l4_csum_err)) {
  814. /* TCP/UDP checksum error */
  815. u64_stats_update_begin(&rx_ring->syncp);
  816. rx_ring->rx_stats.bad_csum++;
  817. u64_stats_update_end(&rx_ring->syncp);
  818. netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
  819. "RX L4 checksum error\n");
  820. skb->ip_summed = CHECKSUM_NONE;
  821. return;
  822. }
  823. skb->ip_summed = CHECKSUM_UNNECESSARY;
  824. }
  825. }
  826. static void ena_set_rx_hash(struct ena_ring *rx_ring,
  827. struct ena_com_rx_ctx *ena_rx_ctx,
  828. struct sk_buff *skb)
  829. {
  830. enum pkt_hash_types hash_type;
  831. if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
  832. if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
  833. (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
  834. hash_type = PKT_HASH_TYPE_L4;
  835. else
  836. hash_type = PKT_HASH_TYPE_NONE;
  837. /* Override hash type if the packet is fragmented */
  838. if (ena_rx_ctx->frag)
  839. hash_type = PKT_HASH_TYPE_NONE;
  840. skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
  841. }
  842. }
  843. /* ena_clean_rx_irq - Cleanup RX irq
  844. * @rx_ring: RX ring to clean
  845. * @napi: napi handler
  846. * @budget: how many packets driver is allowed to clean
  847. *
  848. * Returns the number of cleaned buffers.
  849. */
  850. static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
  851. u32 budget)
  852. {
  853. u16 next_to_clean = rx_ring->next_to_clean;
  854. u32 res_budget, work_done;
  855. struct ena_com_rx_ctx ena_rx_ctx;
  856. struct ena_adapter *adapter;
  857. struct sk_buff *skb;
  858. int refill_required;
  859. int refill_threshold;
  860. int rc = 0;
  861. int total_len = 0;
  862. int rx_copybreak_pkt = 0;
  863. int i;
  864. netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
  865. "%s qid %d\n", __func__, rx_ring->qid);
  866. res_budget = budget;
  867. do {
  868. ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
  869. ena_rx_ctx.max_bufs = rx_ring->sgl_size;
  870. ena_rx_ctx.descs = 0;
  871. rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
  872. rx_ring->ena_com_io_sq,
  873. &ena_rx_ctx);
  874. if (unlikely(rc))
  875. goto error;
  876. if (unlikely(ena_rx_ctx.descs == 0))
  877. break;
  878. netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
  879. "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
  880. rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
  881. ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
  882. /* allocate skb and fill it */
  883. skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
  884. &next_to_clean);
  885. /* exit if we failed to retrieve a buffer */
  886. if (unlikely(!skb)) {
  887. for (i = 0; i < ena_rx_ctx.descs; i++) {
  888. rx_ring->free_tx_ids[next_to_clean] =
  889. rx_ring->ena_bufs[i].req_id;
  890. next_to_clean =
  891. ENA_RX_RING_IDX_NEXT(next_to_clean,
  892. rx_ring->ring_size);
  893. }
  894. break;
  895. }
  896. ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
  897. ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
  898. skb_record_rx_queue(skb, rx_ring->qid);
  899. if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
  900. total_len += rx_ring->ena_bufs[0].len;
  901. rx_copybreak_pkt++;
  902. napi_gro_receive(napi, skb);
  903. } else {
  904. total_len += skb->len;
  905. napi_gro_frags(napi);
  906. }
  907. res_budget--;
  908. } while (likely(res_budget));
  909. work_done = budget - res_budget;
  910. rx_ring->per_napi_bytes += total_len;
  911. rx_ring->per_napi_packets += work_done;
  912. u64_stats_update_begin(&rx_ring->syncp);
  913. rx_ring->rx_stats.bytes += total_len;
  914. rx_ring->rx_stats.cnt += work_done;
  915. rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
  916. u64_stats_update_end(&rx_ring->syncp);
  917. rx_ring->next_to_clean = next_to_clean;
  918. refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
  919. refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
  920. /* Optimization, try to batch new rx buffers */
  921. if (refill_required > refill_threshold) {
  922. ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
  923. ena_refill_rx_bufs(rx_ring, refill_required);
  924. }
  925. return work_done;
  926. error:
  927. adapter = netdev_priv(rx_ring->netdev);
  928. u64_stats_update_begin(&rx_ring->syncp);
  929. rx_ring->rx_stats.bad_desc_num++;
  930. u64_stats_update_end(&rx_ring->syncp);
  931. /* Too many desc from the device. Trigger reset */
  932. adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
  933. set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  934. return 0;
  935. }
  936. inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
  937. struct ena_ring *tx_ring)
  938. {
  939. /* We apply adaptive moderation on Rx path only.
  940. * Tx uses static interrupt moderation.
  941. */
  942. ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
  943. rx_ring->per_napi_packets,
  944. rx_ring->per_napi_bytes,
  945. &rx_ring->smoothed_interval,
  946. &rx_ring->moder_tbl_idx);
  947. /* Reset per napi packets/bytes */
  948. tx_ring->per_napi_packets = 0;
  949. tx_ring->per_napi_bytes = 0;
  950. rx_ring->per_napi_packets = 0;
  951. rx_ring->per_napi_bytes = 0;
  952. }
  953. static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
  954. struct ena_ring *rx_ring)
  955. {
  956. struct ena_eth_io_intr_reg intr_reg;
  957. /* Update intr register: rx intr delay,
  958. * tx intr delay and interrupt unmask
  959. */
  960. ena_com_update_intr_reg(&intr_reg,
  961. rx_ring->smoothed_interval,
  962. tx_ring->smoothed_interval,
  963. true);
  964. /* It is a shared MSI-X.
  965. * Tx and Rx CQ have pointer to it.
  966. * So we use one of them to reach the intr reg
  967. */
  968. ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
  969. }
  970. static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
  971. struct ena_ring *rx_ring)
  972. {
  973. int cpu = get_cpu();
  974. int numa_node;
  975. /* Check only one ring since the 2 rings are running on the same cpu */
  976. if (likely(tx_ring->cpu == cpu))
  977. goto out;
  978. numa_node = cpu_to_node(cpu);
  979. put_cpu();
  980. if (numa_node != NUMA_NO_NODE) {
  981. ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
  982. ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
  983. }
  984. tx_ring->cpu = cpu;
  985. rx_ring->cpu = cpu;
  986. return;
  987. out:
  988. put_cpu();
  989. }
  990. static int ena_io_poll(struct napi_struct *napi, int budget)
  991. {
  992. struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
  993. struct ena_ring *tx_ring, *rx_ring;
  994. u32 tx_work_done;
  995. u32 rx_work_done;
  996. int tx_budget;
  997. int napi_comp_call = 0;
  998. int ret;
  999. tx_ring = ena_napi->tx_ring;
  1000. rx_ring = ena_napi->rx_ring;
  1001. tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
  1002. if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
  1003. test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
  1004. napi_complete_done(napi, 0);
  1005. return 0;
  1006. }
  1007. tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
  1008. rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
  1009. /* If the device is about to reset or down, avoid unmask
  1010. * the interrupt and return 0 so NAPI won't reschedule
  1011. */
  1012. if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
  1013. test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
  1014. napi_complete_done(napi, 0);
  1015. ret = 0;
  1016. } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
  1017. napi_comp_call = 1;
  1018. /* Update numa and unmask the interrupt only when schedule
  1019. * from the interrupt context (vs from sk_busy_loop)
  1020. */
  1021. if (napi_complete_done(napi, rx_work_done)) {
  1022. /* Tx and Rx share the same interrupt vector */
  1023. if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
  1024. ena_adjust_intr_moderation(rx_ring, tx_ring);
  1025. ena_unmask_interrupt(tx_ring, rx_ring);
  1026. }
  1027. ena_update_ring_numa_node(tx_ring, rx_ring);
  1028. ret = rx_work_done;
  1029. } else {
  1030. ret = budget;
  1031. }
  1032. u64_stats_update_begin(&tx_ring->syncp);
  1033. tx_ring->tx_stats.napi_comp += napi_comp_call;
  1034. tx_ring->tx_stats.tx_poll++;
  1035. u64_stats_update_end(&tx_ring->syncp);
  1036. return ret;
  1037. }
  1038. static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
  1039. {
  1040. struct ena_adapter *adapter = (struct ena_adapter *)data;
  1041. ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
  1042. /* Don't call the aenq handler before probe is done */
  1043. if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
  1044. ena_com_aenq_intr_handler(adapter->ena_dev, data);
  1045. return IRQ_HANDLED;
  1046. }
  1047. /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
  1048. * @irq: interrupt number
  1049. * @data: pointer to a network interface private napi device structure
  1050. */
  1051. static irqreturn_t ena_intr_msix_io(int irq, void *data)
  1052. {
  1053. struct ena_napi *ena_napi = data;
  1054. ena_napi->tx_ring->first_interrupt = true;
  1055. ena_napi->rx_ring->first_interrupt = true;
  1056. napi_schedule_irqoff(&ena_napi->napi);
  1057. return IRQ_HANDLED;
  1058. }
  1059. /* Reserve a single MSI-X vector for management (admin + aenq).
  1060. * plus reserve one vector for each potential io queue.
  1061. * the number of potential io queues is the minimum of what the device
  1062. * supports and the number of vCPUs.
  1063. */
  1064. static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
  1065. {
  1066. int msix_vecs, irq_cnt;
  1067. if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
  1068. netif_err(adapter, probe, adapter->netdev,
  1069. "Error, MSI-X is already enabled\n");
  1070. return -EPERM;
  1071. }
  1072. /* Reserved the max msix vectors we might need */
  1073. msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
  1074. netif_dbg(adapter, probe, adapter->netdev,
  1075. "trying to enable MSI-X, vectors %d\n", msix_vecs);
  1076. irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
  1077. msix_vecs, PCI_IRQ_MSIX);
  1078. if (irq_cnt < 0) {
  1079. netif_err(adapter, probe, adapter->netdev,
  1080. "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
  1081. return -ENOSPC;
  1082. }
  1083. if (irq_cnt != msix_vecs) {
  1084. netif_notice(adapter, probe, adapter->netdev,
  1085. "enable only %d MSI-X (out of %d), reduce the number of queues\n",
  1086. irq_cnt, msix_vecs);
  1087. adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
  1088. }
  1089. if (ena_init_rx_cpu_rmap(adapter))
  1090. netif_warn(adapter, probe, adapter->netdev,
  1091. "Failed to map IRQs to CPUs\n");
  1092. adapter->msix_vecs = irq_cnt;
  1093. set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
  1094. return 0;
  1095. }
  1096. static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
  1097. {
  1098. u32 cpu;
  1099. snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
  1100. ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
  1101. pci_name(adapter->pdev));
  1102. adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
  1103. ena_intr_msix_mgmnt;
  1104. adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
  1105. adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
  1106. pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
  1107. cpu = cpumask_first(cpu_online_mask);
  1108. adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
  1109. cpumask_set_cpu(cpu,
  1110. &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
  1111. }
  1112. static void ena_setup_io_intr(struct ena_adapter *adapter)
  1113. {
  1114. struct net_device *netdev;
  1115. int irq_idx, i, cpu;
  1116. netdev = adapter->netdev;
  1117. for (i = 0; i < adapter->num_queues; i++) {
  1118. irq_idx = ENA_IO_IRQ_IDX(i);
  1119. cpu = i % num_online_cpus();
  1120. snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
  1121. "%s-Tx-Rx-%d", netdev->name, i);
  1122. adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
  1123. adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
  1124. adapter->irq_tbl[irq_idx].vector =
  1125. pci_irq_vector(adapter->pdev, irq_idx);
  1126. adapter->irq_tbl[irq_idx].cpu = cpu;
  1127. cpumask_set_cpu(cpu,
  1128. &adapter->irq_tbl[irq_idx].affinity_hint_mask);
  1129. }
  1130. }
  1131. static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
  1132. {
  1133. unsigned long flags = 0;
  1134. struct ena_irq *irq;
  1135. int rc;
  1136. irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
  1137. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  1138. irq->data);
  1139. if (rc) {
  1140. netif_err(adapter, probe, adapter->netdev,
  1141. "failed to request admin irq\n");
  1142. return rc;
  1143. }
  1144. netif_dbg(adapter, probe, adapter->netdev,
  1145. "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
  1146. irq->affinity_hint_mask.bits[0], irq->vector);
  1147. irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
  1148. return rc;
  1149. }
  1150. static int ena_request_io_irq(struct ena_adapter *adapter)
  1151. {
  1152. unsigned long flags = 0;
  1153. struct ena_irq *irq;
  1154. int rc = 0, i, k;
  1155. if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
  1156. netif_err(adapter, ifup, adapter->netdev,
  1157. "Failed to request I/O IRQ: MSI-X is not enabled\n");
  1158. return -EINVAL;
  1159. }
  1160. for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
  1161. irq = &adapter->irq_tbl[i];
  1162. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  1163. irq->data);
  1164. if (rc) {
  1165. netif_err(adapter, ifup, adapter->netdev,
  1166. "Failed to request I/O IRQ. index %d rc %d\n",
  1167. i, rc);
  1168. goto err;
  1169. }
  1170. netif_dbg(adapter, ifup, adapter->netdev,
  1171. "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
  1172. i, irq->affinity_hint_mask.bits[0], irq->vector);
  1173. irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
  1174. }
  1175. return rc;
  1176. err:
  1177. for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
  1178. irq = &adapter->irq_tbl[k];
  1179. free_irq(irq->vector, irq->data);
  1180. }
  1181. return rc;
  1182. }
  1183. static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
  1184. {
  1185. struct ena_irq *irq;
  1186. irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
  1187. synchronize_irq(irq->vector);
  1188. irq_set_affinity_hint(irq->vector, NULL);
  1189. free_irq(irq->vector, irq->data);
  1190. }
  1191. static void ena_free_io_irq(struct ena_adapter *adapter)
  1192. {
  1193. struct ena_irq *irq;
  1194. int i;
  1195. #ifdef CONFIG_RFS_ACCEL
  1196. if (adapter->msix_vecs >= 1) {
  1197. free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
  1198. adapter->netdev->rx_cpu_rmap = NULL;
  1199. }
  1200. #endif /* CONFIG_RFS_ACCEL */
  1201. for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
  1202. irq = &adapter->irq_tbl[i];
  1203. irq_set_affinity_hint(irq->vector, NULL);
  1204. free_irq(irq->vector, irq->data);
  1205. }
  1206. }
  1207. static void ena_disable_msix(struct ena_adapter *adapter)
  1208. {
  1209. if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
  1210. pci_free_irq_vectors(adapter->pdev);
  1211. }
  1212. static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
  1213. {
  1214. int i;
  1215. if (!netif_running(adapter->netdev))
  1216. return;
  1217. for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
  1218. synchronize_irq(adapter->irq_tbl[i].vector);
  1219. }
  1220. static void ena_del_napi(struct ena_adapter *adapter)
  1221. {
  1222. int i;
  1223. for (i = 0; i < adapter->num_queues; i++)
  1224. netif_napi_del(&adapter->ena_napi[i].napi);
  1225. }
  1226. static void ena_init_napi(struct ena_adapter *adapter)
  1227. {
  1228. struct ena_napi *napi;
  1229. int i;
  1230. for (i = 0; i < adapter->num_queues; i++) {
  1231. napi = &adapter->ena_napi[i];
  1232. netif_napi_add(adapter->netdev,
  1233. &adapter->ena_napi[i].napi,
  1234. ena_io_poll,
  1235. ENA_NAPI_BUDGET);
  1236. napi->rx_ring = &adapter->rx_ring[i];
  1237. napi->tx_ring = &adapter->tx_ring[i];
  1238. napi->qid = i;
  1239. }
  1240. }
  1241. static void ena_napi_disable_all(struct ena_adapter *adapter)
  1242. {
  1243. int i;
  1244. for (i = 0; i < adapter->num_queues; i++)
  1245. napi_disable(&adapter->ena_napi[i].napi);
  1246. }
  1247. static void ena_napi_enable_all(struct ena_adapter *adapter)
  1248. {
  1249. int i;
  1250. for (i = 0; i < adapter->num_queues; i++)
  1251. napi_enable(&adapter->ena_napi[i].napi);
  1252. }
  1253. static void ena_restore_ethtool_params(struct ena_adapter *adapter)
  1254. {
  1255. adapter->tx_usecs = 0;
  1256. adapter->rx_usecs = 0;
  1257. adapter->tx_frames = 1;
  1258. adapter->rx_frames = 1;
  1259. }
  1260. /* Configure the Rx forwarding */
  1261. static int ena_rss_configure(struct ena_adapter *adapter)
  1262. {
  1263. struct ena_com_dev *ena_dev = adapter->ena_dev;
  1264. int rc;
  1265. /* In case the RSS table wasn't initialized by probe */
  1266. if (!ena_dev->rss.tbl_log_size) {
  1267. rc = ena_rss_init_default(adapter);
  1268. if (rc && (rc != -EOPNOTSUPP)) {
  1269. netif_err(adapter, ifup, adapter->netdev,
  1270. "Failed to init RSS rc: %d\n", rc);
  1271. return rc;
  1272. }
  1273. }
  1274. /* Set indirect table */
  1275. rc = ena_com_indirect_table_set(ena_dev);
  1276. if (unlikely(rc && rc != -EOPNOTSUPP))
  1277. return rc;
  1278. /* Configure hash function (if supported) */
  1279. rc = ena_com_set_hash_function(ena_dev);
  1280. if (unlikely(rc && (rc != -EOPNOTSUPP)))
  1281. return rc;
  1282. /* Configure hash inputs (if supported) */
  1283. rc = ena_com_set_hash_ctrl(ena_dev);
  1284. if (unlikely(rc && (rc != -EOPNOTSUPP)))
  1285. return rc;
  1286. return 0;
  1287. }
  1288. static int ena_up_complete(struct ena_adapter *adapter)
  1289. {
  1290. int rc;
  1291. rc = ena_rss_configure(adapter);
  1292. if (rc)
  1293. return rc;
  1294. ena_init_napi(adapter);
  1295. ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
  1296. ena_refill_all_rx_bufs(adapter);
  1297. /* enable transmits */
  1298. netif_tx_start_all_queues(adapter->netdev);
  1299. ena_restore_ethtool_params(adapter);
  1300. ena_napi_enable_all(adapter);
  1301. return 0;
  1302. }
  1303. static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
  1304. {
  1305. struct ena_com_create_io_ctx ctx = { 0 };
  1306. struct ena_com_dev *ena_dev;
  1307. struct ena_ring *tx_ring;
  1308. u32 msix_vector;
  1309. u16 ena_qid;
  1310. int rc;
  1311. ena_dev = adapter->ena_dev;
  1312. tx_ring = &adapter->tx_ring[qid];
  1313. msix_vector = ENA_IO_IRQ_IDX(qid);
  1314. ena_qid = ENA_IO_TXQ_IDX(qid);
  1315. ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
  1316. ctx.qid = ena_qid;
  1317. ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
  1318. ctx.msix_vector = msix_vector;
  1319. ctx.queue_size = adapter->tx_ring_size;
  1320. ctx.numa_node = cpu_to_node(tx_ring->cpu);
  1321. rc = ena_com_create_io_queue(ena_dev, &ctx);
  1322. if (rc) {
  1323. netif_err(adapter, ifup, adapter->netdev,
  1324. "Failed to create I/O TX queue num %d rc: %d\n",
  1325. qid, rc);
  1326. return rc;
  1327. }
  1328. rc = ena_com_get_io_handlers(ena_dev, ena_qid,
  1329. &tx_ring->ena_com_io_sq,
  1330. &tx_ring->ena_com_io_cq);
  1331. if (rc) {
  1332. netif_err(adapter, ifup, adapter->netdev,
  1333. "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
  1334. qid, rc);
  1335. ena_com_destroy_io_queue(ena_dev, ena_qid);
  1336. return rc;
  1337. }
  1338. ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
  1339. return rc;
  1340. }
  1341. static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
  1342. {
  1343. struct ena_com_dev *ena_dev = adapter->ena_dev;
  1344. int rc, i;
  1345. for (i = 0; i < adapter->num_queues; i++) {
  1346. rc = ena_create_io_tx_queue(adapter, i);
  1347. if (rc)
  1348. goto create_err;
  1349. }
  1350. return 0;
  1351. create_err:
  1352. while (i--)
  1353. ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
  1354. return rc;
  1355. }
  1356. static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
  1357. {
  1358. struct ena_com_dev *ena_dev;
  1359. struct ena_com_create_io_ctx ctx = { 0 };
  1360. struct ena_ring *rx_ring;
  1361. u32 msix_vector;
  1362. u16 ena_qid;
  1363. int rc;
  1364. ena_dev = adapter->ena_dev;
  1365. rx_ring = &adapter->rx_ring[qid];
  1366. msix_vector = ENA_IO_IRQ_IDX(qid);
  1367. ena_qid = ENA_IO_RXQ_IDX(qid);
  1368. ctx.qid = ena_qid;
  1369. ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
  1370. ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
  1371. ctx.msix_vector = msix_vector;
  1372. ctx.queue_size = adapter->rx_ring_size;
  1373. ctx.numa_node = cpu_to_node(rx_ring->cpu);
  1374. rc = ena_com_create_io_queue(ena_dev, &ctx);
  1375. if (rc) {
  1376. netif_err(adapter, ifup, adapter->netdev,
  1377. "Failed to create I/O RX queue num %d rc: %d\n",
  1378. qid, rc);
  1379. return rc;
  1380. }
  1381. rc = ena_com_get_io_handlers(ena_dev, ena_qid,
  1382. &rx_ring->ena_com_io_sq,
  1383. &rx_ring->ena_com_io_cq);
  1384. if (rc) {
  1385. netif_err(adapter, ifup, adapter->netdev,
  1386. "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
  1387. qid, rc);
  1388. ena_com_destroy_io_queue(ena_dev, ena_qid);
  1389. return rc;
  1390. }
  1391. ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
  1392. return rc;
  1393. }
  1394. static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
  1395. {
  1396. struct ena_com_dev *ena_dev = adapter->ena_dev;
  1397. int rc, i;
  1398. for (i = 0; i < adapter->num_queues; i++) {
  1399. rc = ena_create_io_rx_queue(adapter, i);
  1400. if (rc)
  1401. goto create_err;
  1402. }
  1403. return 0;
  1404. create_err:
  1405. while (i--)
  1406. ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
  1407. return rc;
  1408. }
  1409. static int ena_up(struct ena_adapter *adapter)
  1410. {
  1411. int rc, i;
  1412. netdev_dbg(adapter->netdev, "%s\n", __func__);
  1413. ena_setup_io_intr(adapter);
  1414. rc = ena_request_io_irq(adapter);
  1415. if (rc)
  1416. goto err_req_irq;
  1417. /* allocate transmit descriptors */
  1418. rc = ena_setup_all_tx_resources(adapter);
  1419. if (rc)
  1420. goto err_setup_tx;
  1421. /* allocate receive descriptors */
  1422. rc = ena_setup_all_rx_resources(adapter);
  1423. if (rc)
  1424. goto err_setup_rx;
  1425. /* Create TX queues */
  1426. rc = ena_create_all_io_tx_queues(adapter);
  1427. if (rc)
  1428. goto err_create_tx_queues;
  1429. /* Create RX queues */
  1430. rc = ena_create_all_io_rx_queues(adapter);
  1431. if (rc)
  1432. goto err_create_rx_queues;
  1433. rc = ena_up_complete(adapter);
  1434. if (rc)
  1435. goto err_up;
  1436. if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
  1437. netif_carrier_on(adapter->netdev);
  1438. u64_stats_update_begin(&adapter->syncp);
  1439. adapter->dev_stats.interface_up++;
  1440. u64_stats_update_end(&adapter->syncp);
  1441. set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
  1442. /* Enable completion queues interrupt */
  1443. for (i = 0; i < adapter->num_queues; i++)
  1444. ena_unmask_interrupt(&adapter->tx_ring[i],
  1445. &adapter->rx_ring[i]);
  1446. /* schedule napi in case we had pending packets
  1447. * from the last time we disable napi
  1448. */
  1449. for (i = 0; i < adapter->num_queues; i++)
  1450. napi_schedule(&adapter->ena_napi[i].napi);
  1451. return rc;
  1452. err_up:
  1453. ena_destroy_all_rx_queues(adapter);
  1454. err_create_rx_queues:
  1455. ena_destroy_all_tx_queues(adapter);
  1456. err_create_tx_queues:
  1457. ena_free_all_io_rx_resources(adapter);
  1458. err_setup_rx:
  1459. ena_free_all_io_tx_resources(adapter);
  1460. err_setup_tx:
  1461. ena_free_io_irq(adapter);
  1462. err_req_irq:
  1463. return rc;
  1464. }
  1465. static void ena_down(struct ena_adapter *adapter)
  1466. {
  1467. netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
  1468. clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
  1469. u64_stats_update_begin(&adapter->syncp);
  1470. adapter->dev_stats.interface_down++;
  1471. u64_stats_update_end(&adapter->syncp);
  1472. netif_carrier_off(adapter->netdev);
  1473. netif_tx_disable(adapter->netdev);
  1474. /* After this point the napi handler won't enable the tx queue */
  1475. ena_napi_disable_all(adapter);
  1476. /* After destroy the queue there won't be any new interrupts */
  1477. if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
  1478. int rc;
  1479. rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
  1480. if (rc)
  1481. dev_err(&adapter->pdev->dev, "Device reset failed\n");
  1482. }
  1483. ena_destroy_all_io_queues(adapter);
  1484. ena_disable_io_intr_sync(adapter);
  1485. ena_free_io_irq(adapter);
  1486. ena_del_napi(adapter);
  1487. ena_free_all_tx_bufs(adapter);
  1488. ena_free_all_rx_bufs(adapter);
  1489. ena_free_all_io_tx_resources(adapter);
  1490. ena_free_all_io_rx_resources(adapter);
  1491. }
  1492. /* ena_open - Called when a network interface is made active
  1493. * @netdev: network interface device structure
  1494. *
  1495. * Returns 0 on success, negative value on failure
  1496. *
  1497. * The open entry point is called when a network interface is made
  1498. * active by the system (IFF_UP). At this point all resources needed
  1499. * for transmit and receive operations are allocated, the interrupt
  1500. * handler is registered with the OS, the watchdog timer is started,
  1501. * and the stack is notified that the interface is ready.
  1502. */
  1503. static int ena_open(struct net_device *netdev)
  1504. {
  1505. struct ena_adapter *adapter = netdev_priv(netdev);
  1506. int rc;
  1507. /* Notify the stack of the actual queue counts. */
  1508. rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
  1509. if (rc) {
  1510. netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
  1511. return rc;
  1512. }
  1513. rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
  1514. if (rc) {
  1515. netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
  1516. return rc;
  1517. }
  1518. rc = ena_up(adapter);
  1519. if (rc)
  1520. return rc;
  1521. return rc;
  1522. }
  1523. /* ena_close - Disables a network interface
  1524. * @netdev: network interface device structure
  1525. *
  1526. * Returns 0, this is not allowed to fail
  1527. *
  1528. * The close entry point is called when an interface is de-activated
  1529. * by the OS. The hardware is still under the drivers control, but
  1530. * needs to be disabled. A global MAC reset is issued to stop the
  1531. * hardware, and all transmit and receive resources are freed.
  1532. */
  1533. static int ena_close(struct net_device *netdev)
  1534. {
  1535. struct ena_adapter *adapter = netdev_priv(netdev);
  1536. netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
  1537. if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
  1538. ena_down(adapter);
  1539. /* Check for device status and issue reset if needed*/
  1540. check_for_admin_com_state(adapter);
  1541. if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
  1542. netif_err(adapter, ifdown, adapter->netdev,
  1543. "Destroy failure, restarting device\n");
  1544. ena_dump_stats_to_dmesg(adapter);
  1545. /* rtnl lock already obtained in dev_ioctl() layer */
  1546. ena_destroy_device(adapter);
  1547. ena_restore_device(adapter);
  1548. }
  1549. return 0;
  1550. }
  1551. static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
  1552. {
  1553. u32 mss = skb_shinfo(skb)->gso_size;
  1554. struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
  1555. u8 l4_protocol = 0;
  1556. if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
  1557. ena_tx_ctx->l4_csum_enable = 1;
  1558. if (mss) {
  1559. ena_tx_ctx->tso_enable = 1;
  1560. ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
  1561. ena_tx_ctx->l4_csum_partial = 0;
  1562. } else {
  1563. ena_tx_ctx->tso_enable = 0;
  1564. ena_meta->l4_hdr_len = 0;
  1565. ena_tx_ctx->l4_csum_partial = 1;
  1566. }
  1567. switch (ip_hdr(skb)->version) {
  1568. case IPVERSION:
  1569. ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
  1570. if (ip_hdr(skb)->frag_off & htons(IP_DF))
  1571. ena_tx_ctx->df = 1;
  1572. if (mss)
  1573. ena_tx_ctx->l3_csum_enable = 1;
  1574. l4_protocol = ip_hdr(skb)->protocol;
  1575. break;
  1576. case 6:
  1577. ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
  1578. l4_protocol = ipv6_hdr(skb)->nexthdr;
  1579. break;
  1580. default:
  1581. break;
  1582. }
  1583. if (l4_protocol == IPPROTO_TCP)
  1584. ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
  1585. else
  1586. ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
  1587. ena_meta->mss = mss;
  1588. ena_meta->l3_hdr_len = skb_network_header_len(skb);
  1589. ena_meta->l3_hdr_offset = skb_network_offset(skb);
  1590. ena_tx_ctx->meta_valid = 1;
  1591. } else {
  1592. ena_tx_ctx->meta_valid = 0;
  1593. }
  1594. }
  1595. static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
  1596. struct sk_buff *skb)
  1597. {
  1598. int num_frags, header_len, rc;
  1599. num_frags = skb_shinfo(skb)->nr_frags;
  1600. header_len = skb_headlen(skb);
  1601. if (num_frags < tx_ring->sgl_size)
  1602. return 0;
  1603. if ((num_frags == tx_ring->sgl_size) &&
  1604. (header_len < tx_ring->tx_max_header_size))
  1605. return 0;
  1606. u64_stats_update_begin(&tx_ring->syncp);
  1607. tx_ring->tx_stats.linearize++;
  1608. u64_stats_update_end(&tx_ring->syncp);
  1609. rc = skb_linearize(skb);
  1610. if (unlikely(rc)) {
  1611. u64_stats_update_begin(&tx_ring->syncp);
  1612. tx_ring->tx_stats.linearize_failed++;
  1613. u64_stats_update_end(&tx_ring->syncp);
  1614. }
  1615. return rc;
  1616. }
  1617. /* Called with netif_tx_lock. */
  1618. static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1619. {
  1620. struct ena_adapter *adapter = netdev_priv(dev);
  1621. struct ena_tx_buffer *tx_info;
  1622. struct ena_com_tx_ctx ena_tx_ctx;
  1623. struct ena_ring *tx_ring;
  1624. struct netdev_queue *txq;
  1625. struct ena_com_buf *ena_buf;
  1626. void *push_hdr;
  1627. u32 len, last_frag;
  1628. u16 next_to_use;
  1629. u16 req_id;
  1630. u16 push_len;
  1631. u16 header_len;
  1632. dma_addr_t dma;
  1633. int qid, rc, nb_hw_desc;
  1634. int i = -1;
  1635. netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
  1636. /* Determine which tx ring we will be placed on */
  1637. qid = skb_get_queue_mapping(skb);
  1638. tx_ring = &adapter->tx_ring[qid];
  1639. txq = netdev_get_tx_queue(dev, qid);
  1640. rc = ena_check_and_linearize_skb(tx_ring, skb);
  1641. if (unlikely(rc))
  1642. goto error_drop_packet;
  1643. skb_tx_timestamp(skb);
  1644. len = skb_headlen(skb);
  1645. next_to_use = tx_ring->next_to_use;
  1646. req_id = tx_ring->free_tx_ids[next_to_use];
  1647. tx_info = &tx_ring->tx_buffer_info[req_id];
  1648. tx_info->num_of_bufs = 0;
  1649. WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
  1650. ena_buf = tx_info->bufs;
  1651. tx_info->skb = skb;
  1652. if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  1653. /* prepared the push buffer */
  1654. push_len = min_t(u32, len, tx_ring->tx_max_header_size);
  1655. header_len = push_len;
  1656. push_hdr = skb->data;
  1657. } else {
  1658. push_len = 0;
  1659. header_len = min_t(u32, len, tx_ring->tx_max_header_size);
  1660. push_hdr = NULL;
  1661. }
  1662. netif_dbg(adapter, tx_queued, dev,
  1663. "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
  1664. push_hdr, push_len);
  1665. if (len > push_len) {
  1666. dma = dma_map_single(tx_ring->dev, skb->data + push_len,
  1667. len - push_len, DMA_TO_DEVICE);
  1668. if (dma_mapping_error(tx_ring->dev, dma))
  1669. goto error_report_dma_error;
  1670. ena_buf->paddr = dma;
  1671. ena_buf->len = len - push_len;
  1672. ena_buf++;
  1673. tx_info->num_of_bufs++;
  1674. }
  1675. last_frag = skb_shinfo(skb)->nr_frags;
  1676. for (i = 0; i < last_frag; i++) {
  1677. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1678. len = skb_frag_size(frag);
  1679. dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
  1680. DMA_TO_DEVICE);
  1681. if (dma_mapping_error(tx_ring->dev, dma))
  1682. goto error_report_dma_error;
  1683. ena_buf->paddr = dma;
  1684. ena_buf->len = len;
  1685. ena_buf++;
  1686. }
  1687. tx_info->num_of_bufs += last_frag;
  1688. memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
  1689. ena_tx_ctx.ena_bufs = tx_info->bufs;
  1690. ena_tx_ctx.push_header = push_hdr;
  1691. ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
  1692. ena_tx_ctx.req_id = req_id;
  1693. ena_tx_ctx.header_len = header_len;
  1694. /* set flags and meta data */
  1695. ena_tx_csum(&ena_tx_ctx, skb);
  1696. /* prepare the packet's descriptors to dma engine */
  1697. rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
  1698. &nb_hw_desc);
  1699. if (unlikely(rc)) {
  1700. netif_err(adapter, tx_queued, dev,
  1701. "failed to prepare tx bufs\n");
  1702. u64_stats_update_begin(&tx_ring->syncp);
  1703. tx_ring->tx_stats.queue_stop++;
  1704. tx_ring->tx_stats.prepare_ctx_err++;
  1705. u64_stats_update_end(&tx_ring->syncp);
  1706. netif_tx_stop_queue(txq);
  1707. goto error_unmap_dma;
  1708. }
  1709. netdev_tx_sent_queue(txq, skb->len);
  1710. u64_stats_update_begin(&tx_ring->syncp);
  1711. tx_ring->tx_stats.cnt++;
  1712. tx_ring->tx_stats.bytes += skb->len;
  1713. u64_stats_update_end(&tx_ring->syncp);
  1714. tx_info->tx_descs = nb_hw_desc;
  1715. tx_info->last_jiffies = jiffies;
  1716. tx_info->print_once = 0;
  1717. tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
  1718. tx_ring->ring_size);
  1719. /* This WMB is aimed to:
  1720. * 1 - perform smp barrier before reading next_to_completion
  1721. * 2 - make sure the desc were written before trigger DB
  1722. */
  1723. wmb();
  1724. /* stop the queue when no more space available, the packet can have up
  1725. * to sgl_size + 2. one for the meta descriptor and one for header
  1726. * (if the header is larger than tx_max_header_size).
  1727. */
  1728. if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
  1729. (tx_ring->sgl_size + 2))) {
  1730. netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
  1731. __func__, qid);
  1732. netif_tx_stop_queue(txq);
  1733. u64_stats_update_begin(&tx_ring->syncp);
  1734. tx_ring->tx_stats.queue_stop++;
  1735. u64_stats_update_end(&tx_ring->syncp);
  1736. /* There is a rare condition where this function decide to
  1737. * stop the queue but meanwhile clean_tx_irq updates
  1738. * next_to_completion and terminates.
  1739. * The queue will remain stopped forever.
  1740. * To solve this issue this function perform rmb, check
  1741. * the wakeup condition and wake up the queue if needed.
  1742. */
  1743. smp_rmb();
  1744. if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
  1745. > ENA_TX_WAKEUP_THRESH) {
  1746. netif_tx_wake_queue(txq);
  1747. u64_stats_update_begin(&tx_ring->syncp);
  1748. tx_ring->tx_stats.queue_wakeup++;
  1749. u64_stats_update_end(&tx_ring->syncp);
  1750. }
  1751. }
  1752. if (netif_xmit_stopped(txq) || !skb->xmit_more) {
  1753. /* trigger the dma engine */
  1754. ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
  1755. u64_stats_update_begin(&tx_ring->syncp);
  1756. tx_ring->tx_stats.doorbells++;
  1757. u64_stats_update_end(&tx_ring->syncp);
  1758. }
  1759. return NETDEV_TX_OK;
  1760. error_report_dma_error:
  1761. u64_stats_update_begin(&tx_ring->syncp);
  1762. tx_ring->tx_stats.dma_mapping_err++;
  1763. u64_stats_update_end(&tx_ring->syncp);
  1764. netdev_warn(adapter->netdev, "failed to map skb\n");
  1765. tx_info->skb = NULL;
  1766. error_unmap_dma:
  1767. if (i >= 0) {
  1768. /* save value of frag that failed */
  1769. last_frag = i;
  1770. /* start back at beginning and unmap skb */
  1771. tx_info->skb = NULL;
  1772. ena_buf = tx_info->bufs;
  1773. dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
  1774. dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
  1775. /* unmap remaining mapped pages */
  1776. for (i = 0; i < last_frag; i++) {
  1777. ena_buf++;
  1778. dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
  1779. dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
  1780. }
  1781. }
  1782. error_drop_packet:
  1783. dev_kfree_skb(skb);
  1784. return NETDEV_TX_OK;
  1785. }
  1786. #ifdef CONFIG_NET_POLL_CONTROLLER
  1787. static void ena_netpoll(struct net_device *netdev)
  1788. {
  1789. struct ena_adapter *adapter = netdev_priv(netdev);
  1790. int i;
  1791. /* Dont schedule NAPI if the driver is in the middle of reset
  1792. * or netdev is down.
  1793. */
  1794. if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
  1795. test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
  1796. return;
  1797. for (i = 0; i < adapter->num_queues; i++)
  1798. napi_schedule(&adapter->ena_napi[i].napi);
  1799. }
  1800. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1801. static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
  1802. void *accel_priv, select_queue_fallback_t fallback)
  1803. {
  1804. u16 qid;
  1805. /* we suspect that this is good for in--kernel network services that
  1806. * want to loop incoming skb rx to tx in normal user generated traffic,
  1807. * most probably we will not get to this
  1808. */
  1809. if (skb_rx_queue_recorded(skb))
  1810. qid = skb_get_rx_queue(skb);
  1811. else
  1812. qid = fallback(dev, skb);
  1813. return qid;
  1814. }
  1815. static void ena_config_host_info(struct ena_com_dev *ena_dev)
  1816. {
  1817. struct ena_admin_host_info *host_info;
  1818. int rc;
  1819. /* Allocate only the host info */
  1820. rc = ena_com_allocate_host_info(ena_dev);
  1821. if (rc) {
  1822. pr_err("Cannot allocate host info\n");
  1823. return;
  1824. }
  1825. host_info = ena_dev->host_attr.host_info;
  1826. host_info->os_type = ENA_ADMIN_OS_LINUX;
  1827. host_info->kernel_ver = LINUX_VERSION_CODE;
  1828. strncpy(host_info->kernel_ver_str, utsname()->version,
  1829. sizeof(host_info->kernel_ver_str) - 1);
  1830. host_info->os_dist = 0;
  1831. strncpy(host_info->os_dist_str, utsname()->release,
  1832. sizeof(host_info->os_dist_str) - 1);
  1833. host_info->driver_version =
  1834. (DRV_MODULE_VER_MAJOR) |
  1835. (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
  1836. (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
  1837. rc = ena_com_set_host_attributes(ena_dev);
  1838. if (rc) {
  1839. if (rc == -EOPNOTSUPP)
  1840. pr_warn("Cannot set host attributes\n");
  1841. else
  1842. pr_err("Cannot set host attributes\n");
  1843. goto err;
  1844. }
  1845. return;
  1846. err:
  1847. ena_com_delete_host_info(ena_dev);
  1848. }
  1849. static void ena_config_debug_area(struct ena_adapter *adapter)
  1850. {
  1851. u32 debug_area_size;
  1852. int rc, ss_count;
  1853. ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
  1854. if (ss_count <= 0) {
  1855. netif_err(adapter, drv, adapter->netdev,
  1856. "SS count is negative\n");
  1857. return;
  1858. }
  1859. /* allocate 32 bytes for each string and 64bit for the value */
  1860. debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
  1861. rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
  1862. if (rc) {
  1863. pr_err("Cannot allocate debug area\n");
  1864. return;
  1865. }
  1866. rc = ena_com_set_host_attributes(adapter->ena_dev);
  1867. if (rc) {
  1868. if (rc == -EOPNOTSUPP)
  1869. netif_warn(adapter, drv, adapter->netdev,
  1870. "Cannot set host attributes\n");
  1871. else
  1872. netif_err(adapter, drv, adapter->netdev,
  1873. "Cannot set host attributes\n");
  1874. goto err;
  1875. }
  1876. return;
  1877. err:
  1878. ena_com_delete_debug_area(adapter->ena_dev);
  1879. }
  1880. static void ena_get_stats64(struct net_device *netdev,
  1881. struct rtnl_link_stats64 *stats)
  1882. {
  1883. struct ena_adapter *adapter = netdev_priv(netdev);
  1884. struct ena_ring *rx_ring, *tx_ring;
  1885. unsigned int start;
  1886. u64 rx_drops;
  1887. int i;
  1888. if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
  1889. return;
  1890. for (i = 0; i < adapter->num_queues; i++) {
  1891. u64 bytes, packets;
  1892. tx_ring = &adapter->tx_ring[i];
  1893. do {
  1894. start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
  1895. packets = tx_ring->tx_stats.cnt;
  1896. bytes = tx_ring->tx_stats.bytes;
  1897. } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
  1898. stats->tx_packets += packets;
  1899. stats->tx_bytes += bytes;
  1900. rx_ring = &adapter->rx_ring[i];
  1901. do {
  1902. start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
  1903. packets = rx_ring->rx_stats.cnt;
  1904. bytes = rx_ring->rx_stats.bytes;
  1905. } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
  1906. stats->rx_packets += packets;
  1907. stats->rx_bytes += bytes;
  1908. }
  1909. do {
  1910. start = u64_stats_fetch_begin_irq(&adapter->syncp);
  1911. rx_drops = adapter->dev_stats.rx_drops;
  1912. } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
  1913. stats->rx_dropped = rx_drops;
  1914. stats->multicast = 0;
  1915. stats->collisions = 0;
  1916. stats->rx_length_errors = 0;
  1917. stats->rx_crc_errors = 0;
  1918. stats->rx_frame_errors = 0;
  1919. stats->rx_fifo_errors = 0;
  1920. stats->rx_missed_errors = 0;
  1921. stats->tx_window_errors = 0;
  1922. stats->rx_errors = 0;
  1923. stats->tx_errors = 0;
  1924. }
  1925. static const struct net_device_ops ena_netdev_ops = {
  1926. .ndo_open = ena_open,
  1927. .ndo_stop = ena_close,
  1928. .ndo_start_xmit = ena_start_xmit,
  1929. .ndo_select_queue = ena_select_queue,
  1930. .ndo_get_stats64 = ena_get_stats64,
  1931. .ndo_tx_timeout = ena_tx_timeout,
  1932. .ndo_change_mtu = ena_change_mtu,
  1933. .ndo_set_mac_address = NULL,
  1934. .ndo_validate_addr = eth_validate_addr,
  1935. #ifdef CONFIG_NET_POLL_CONTROLLER
  1936. .ndo_poll_controller = ena_netpoll,
  1937. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1938. };
  1939. static int ena_device_validate_params(struct ena_adapter *adapter,
  1940. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  1941. {
  1942. struct net_device *netdev = adapter->netdev;
  1943. int rc;
  1944. rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
  1945. adapter->mac_addr);
  1946. if (!rc) {
  1947. netif_err(adapter, drv, netdev,
  1948. "Error, mac address are different\n");
  1949. return -EINVAL;
  1950. }
  1951. if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
  1952. (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
  1953. netif_err(adapter, drv, netdev,
  1954. "Error, device doesn't support enough queues\n");
  1955. return -EINVAL;
  1956. }
  1957. if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
  1958. netif_err(adapter, drv, netdev,
  1959. "Error, device max mtu is smaller than netdev MTU\n");
  1960. return -EINVAL;
  1961. }
  1962. return 0;
  1963. }
  1964. static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
  1965. struct ena_com_dev_get_features_ctx *get_feat_ctx,
  1966. bool *wd_state)
  1967. {
  1968. struct device *dev = &pdev->dev;
  1969. bool readless_supported;
  1970. u32 aenq_groups;
  1971. int dma_width;
  1972. int rc;
  1973. rc = ena_com_mmio_reg_read_request_init(ena_dev);
  1974. if (rc) {
  1975. dev_err(dev, "failed to init mmio read less\n");
  1976. return rc;
  1977. }
  1978. /* The PCIe configuration space revision id indicate if mmio reg
  1979. * read is disabled
  1980. */
  1981. readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
  1982. ena_com_set_mmio_read_mode(ena_dev, readless_supported);
  1983. rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
  1984. if (rc) {
  1985. dev_err(dev, "Can not reset device\n");
  1986. goto err_mmio_read_less;
  1987. }
  1988. rc = ena_com_validate_version(ena_dev);
  1989. if (rc) {
  1990. dev_err(dev, "device version is too low\n");
  1991. goto err_mmio_read_less;
  1992. }
  1993. dma_width = ena_com_get_dma_width(ena_dev);
  1994. if (dma_width < 0) {
  1995. dev_err(dev, "Invalid dma width value %d", dma_width);
  1996. rc = dma_width;
  1997. goto err_mmio_read_less;
  1998. }
  1999. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
  2000. if (rc) {
  2001. dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
  2002. goto err_mmio_read_less;
  2003. }
  2004. rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
  2005. if (rc) {
  2006. dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
  2007. rc);
  2008. goto err_mmio_read_less;
  2009. }
  2010. /* ENA admin level init */
  2011. rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
  2012. if (rc) {
  2013. dev_err(dev,
  2014. "Can not initialize ena admin queue with device\n");
  2015. goto err_mmio_read_less;
  2016. }
  2017. /* To enable the msix interrupts the driver needs to know the number
  2018. * of queues. So the driver uses polling mode to retrieve this
  2019. * information
  2020. */
  2021. ena_com_set_admin_polling_mode(ena_dev, true);
  2022. ena_config_host_info(ena_dev);
  2023. /* Get Device Attributes*/
  2024. rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
  2025. if (rc) {
  2026. dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
  2027. goto err_admin_init;
  2028. }
  2029. /* Try to turn all the available aenq groups */
  2030. aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
  2031. BIT(ENA_ADMIN_FATAL_ERROR) |
  2032. BIT(ENA_ADMIN_WARNING) |
  2033. BIT(ENA_ADMIN_NOTIFICATION) |
  2034. BIT(ENA_ADMIN_KEEP_ALIVE);
  2035. aenq_groups &= get_feat_ctx->aenq.supported_groups;
  2036. rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
  2037. if (rc) {
  2038. dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
  2039. goto err_admin_init;
  2040. }
  2041. *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
  2042. return 0;
  2043. err_admin_init:
  2044. ena_com_delete_host_info(ena_dev);
  2045. ena_com_admin_destroy(ena_dev);
  2046. err_mmio_read_less:
  2047. ena_com_mmio_reg_read_request_destroy(ena_dev);
  2048. return rc;
  2049. }
  2050. static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
  2051. int io_vectors)
  2052. {
  2053. struct ena_com_dev *ena_dev = adapter->ena_dev;
  2054. struct device *dev = &adapter->pdev->dev;
  2055. int rc;
  2056. rc = ena_enable_msix(adapter, io_vectors);
  2057. if (rc) {
  2058. dev_err(dev, "Can not reserve msix vectors\n");
  2059. return rc;
  2060. }
  2061. ena_setup_mgmnt_intr(adapter);
  2062. rc = ena_request_mgmnt_irq(adapter);
  2063. if (rc) {
  2064. dev_err(dev, "Can not setup management interrupts\n");
  2065. goto err_disable_msix;
  2066. }
  2067. ena_com_set_admin_polling_mode(ena_dev, false);
  2068. ena_com_admin_aenq_enable(ena_dev);
  2069. return 0;
  2070. err_disable_msix:
  2071. ena_disable_msix(adapter);
  2072. return rc;
  2073. }
  2074. static void ena_destroy_device(struct ena_adapter *adapter)
  2075. {
  2076. struct net_device *netdev = adapter->netdev;
  2077. struct ena_com_dev *ena_dev = adapter->ena_dev;
  2078. bool dev_up;
  2079. netif_carrier_off(netdev);
  2080. del_timer_sync(&adapter->timer_service);
  2081. dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
  2082. adapter->dev_up_before_reset = dev_up;
  2083. ena_com_set_admin_running_state(ena_dev, false);
  2084. if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
  2085. ena_down(adapter);
  2086. /* Before releasing the ENA resources, a device reset is required.
  2087. * (to prevent the device from accessing them).
  2088. * In case the reset flag is set and the device is up, ena_down()
  2089. * already perform the reset, so it can be skipped.
  2090. */
  2091. if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
  2092. ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
  2093. ena_free_mgmnt_irq(adapter);
  2094. ena_disable_msix(adapter);
  2095. ena_com_abort_admin_commands(ena_dev);
  2096. ena_com_wait_for_abort_completion(ena_dev);
  2097. ena_com_admin_destroy(ena_dev);
  2098. ena_com_mmio_reg_read_request_destroy(ena_dev);
  2099. adapter->reset_reason = ENA_REGS_RESET_NORMAL;
  2100. clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  2101. }
  2102. static int ena_restore_device(struct ena_adapter *adapter)
  2103. {
  2104. struct ena_com_dev_get_features_ctx get_feat_ctx;
  2105. struct ena_com_dev *ena_dev = adapter->ena_dev;
  2106. struct pci_dev *pdev = adapter->pdev;
  2107. bool wd_state;
  2108. int rc;
  2109. set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
  2110. rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
  2111. if (rc) {
  2112. dev_err(&pdev->dev, "Can not initialize device\n");
  2113. goto err;
  2114. }
  2115. adapter->wd_state = wd_state;
  2116. rc = ena_device_validate_params(adapter, &get_feat_ctx);
  2117. if (rc) {
  2118. dev_err(&pdev->dev, "Validation of device parameters failed\n");
  2119. goto err_device_destroy;
  2120. }
  2121. clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
  2122. /* Make sure we don't have a race with AENQ Links state handler */
  2123. if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
  2124. netif_carrier_on(adapter->netdev);
  2125. rc = ena_enable_msix_and_set_admin_interrupts(adapter,
  2126. adapter->num_queues);
  2127. if (rc) {
  2128. dev_err(&pdev->dev, "Enable MSI-X failed\n");
  2129. goto err_device_destroy;
  2130. }
  2131. /* If the interface was up before the reset bring it up */
  2132. if (adapter->dev_up_before_reset) {
  2133. rc = ena_up(adapter);
  2134. if (rc) {
  2135. dev_err(&pdev->dev, "Failed to create I/O queues\n");
  2136. goto err_disable_msix;
  2137. }
  2138. }
  2139. mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
  2140. dev_err(&pdev->dev, "Device reset completed successfully\n");
  2141. return rc;
  2142. err_disable_msix:
  2143. ena_free_mgmnt_irq(adapter);
  2144. ena_disable_msix(adapter);
  2145. err_device_destroy:
  2146. ena_com_admin_destroy(ena_dev);
  2147. err:
  2148. clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
  2149. clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
  2150. dev_err(&pdev->dev,
  2151. "Reset attempt failed. Can not reset the device\n");
  2152. return rc;
  2153. }
  2154. static void ena_fw_reset_device(struct work_struct *work)
  2155. {
  2156. struct ena_adapter *adapter =
  2157. container_of(work, struct ena_adapter, reset_task);
  2158. struct pci_dev *pdev = adapter->pdev;
  2159. if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
  2160. dev_err(&pdev->dev,
  2161. "device reset schedule while reset bit is off\n");
  2162. return;
  2163. }
  2164. rtnl_lock();
  2165. ena_destroy_device(adapter);
  2166. ena_restore_device(adapter);
  2167. rtnl_unlock();
  2168. }
  2169. static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
  2170. struct ena_ring *rx_ring)
  2171. {
  2172. if (likely(rx_ring->first_interrupt))
  2173. return 0;
  2174. if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
  2175. return 0;
  2176. rx_ring->no_interrupt_event_cnt++;
  2177. if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
  2178. netif_err(adapter, rx_err, adapter->netdev,
  2179. "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
  2180. rx_ring->qid);
  2181. adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
  2182. smp_mb__before_atomic();
  2183. set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  2184. return -EIO;
  2185. }
  2186. return 0;
  2187. }
  2188. static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
  2189. struct ena_ring *tx_ring)
  2190. {
  2191. struct ena_tx_buffer *tx_buf;
  2192. unsigned long last_jiffies;
  2193. u32 missed_tx = 0;
  2194. int i, rc = 0;
  2195. for (i = 0; i < tx_ring->ring_size; i++) {
  2196. tx_buf = &tx_ring->tx_buffer_info[i];
  2197. last_jiffies = tx_buf->last_jiffies;
  2198. if (last_jiffies == 0)
  2199. /* no pending Tx at this location */
  2200. continue;
  2201. if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
  2202. 2 * adapter->missing_tx_completion_to))) {
  2203. /* If after graceful period interrupt is still not
  2204. * received, we schedule a reset
  2205. */
  2206. netif_err(adapter, tx_err, adapter->netdev,
  2207. "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
  2208. tx_ring->qid);
  2209. adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
  2210. smp_mb__before_atomic();
  2211. set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  2212. return -EIO;
  2213. }
  2214. if (unlikely(time_is_before_jiffies(last_jiffies +
  2215. adapter->missing_tx_completion_to))) {
  2216. if (!tx_buf->print_once)
  2217. netif_notice(adapter, tx_err, adapter->netdev,
  2218. "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
  2219. tx_ring->qid, i);
  2220. tx_buf->print_once = 1;
  2221. missed_tx++;
  2222. }
  2223. }
  2224. if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
  2225. netif_err(adapter, tx_err, adapter->netdev,
  2226. "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
  2227. missed_tx,
  2228. adapter->missing_tx_completion_threshold);
  2229. adapter->reset_reason =
  2230. ENA_REGS_RESET_MISS_TX_CMPL;
  2231. set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  2232. rc = -EIO;
  2233. }
  2234. u64_stats_update_begin(&tx_ring->syncp);
  2235. tx_ring->tx_stats.missed_tx = missed_tx;
  2236. u64_stats_update_end(&tx_ring->syncp);
  2237. return rc;
  2238. }
  2239. static void check_for_missing_completions(struct ena_adapter *adapter)
  2240. {
  2241. struct ena_ring *tx_ring;
  2242. struct ena_ring *rx_ring;
  2243. int i, budget, rc;
  2244. /* Make sure the driver doesn't turn the device in other process */
  2245. smp_rmb();
  2246. if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
  2247. return;
  2248. if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
  2249. return;
  2250. if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
  2251. return;
  2252. budget = ENA_MONITORED_TX_QUEUES;
  2253. for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
  2254. tx_ring = &adapter->tx_ring[i];
  2255. rx_ring = &adapter->rx_ring[i];
  2256. rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
  2257. if (unlikely(rc))
  2258. return;
  2259. rc = check_for_rx_interrupt_queue(adapter, rx_ring);
  2260. if (unlikely(rc))
  2261. return;
  2262. budget--;
  2263. if (!budget)
  2264. break;
  2265. }
  2266. adapter->last_monitored_tx_qid = i % adapter->num_queues;
  2267. }
  2268. /* trigger napi schedule after 2 consecutive detections */
  2269. #define EMPTY_RX_REFILL 2
  2270. /* For the rare case where the device runs out of Rx descriptors and the
  2271. * napi handler failed to refill new Rx descriptors (due to a lack of memory
  2272. * for example).
  2273. * This case will lead to a deadlock:
  2274. * The device won't send interrupts since all the new Rx packets will be dropped
  2275. * The napi handler won't allocate new Rx descriptors so the device will be
  2276. * able to send new packets.
  2277. *
  2278. * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
  2279. * It is recommended to have at least 512MB, with a minimum of 128MB for
  2280. * constrained environment).
  2281. *
  2282. * When such a situation is detected - Reschedule napi
  2283. */
  2284. static void check_for_empty_rx_ring(struct ena_adapter *adapter)
  2285. {
  2286. struct ena_ring *rx_ring;
  2287. int i, refill_required;
  2288. if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
  2289. return;
  2290. if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
  2291. return;
  2292. for (i = 0; i < adapter->num_queues; i++) {
  2293. rx_ring = &adapter->rx_ring[i];
  2294. refill_required =
  2295. ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
  2296. if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
  2297. rx_ring->empty_rx_queue++;
  2298. if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
  2299. u64_stats_update_begin(&rx_ring->syncp);
  2300. rx_ring->rx_stats.empty_rx_ring++;
  2301. u64_stats_update_end(&rx_ring->syncp);
  2302. netif_err(adapter, drv, adapter->netdev,
  2303. "trigger refill for ring %d\n", i);
  2304. napi_schedule(rx_ring->napi);
  2305. rx_ring->empty_rx_queue = 0;
  2306. }
  2307. } else {
  2308. rx_ring->empty_rx_queue = 0;
  2309. }
  2310. }
  2311. }
  2312. /* Check for keep alive expiration */
  2313. static void check_for_missing_keep_alive(struct ena_adapter *adapter)
  2314. {
  2315. unsigned long keep_alive_expired;
  2316. if (!adapter->wd_state)
  2317. return;
  2318. if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
  2319. return;
  2320. keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
  2321. adapter->keep_alive_timeout);
  2322. if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
  2323. netif_err(adapter, drv, adapter->netdev,
  2324. "Keep alive watchdog timeout.\n");
  2325. u64_stats_update_begin(&adapter->syncp);
  2326. adapter->dev_stats.wd_expired++;
  2327. u64_stats_update_end(&adapter->syncp);
  2328. adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
  2329. set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  2330. }
  2331. }
  2332. static void check_for_admin_com_state(struct ena_adapter *adapter)
  2333. {
  2334. if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
  2335. netif_err(adapter, drv, adapter->netdev,
  2336. "ENA admin queue is not in running state!\n");
  2337. u64_stats_update_begin(&adapter->syncp);
  2338. adapter->dev_stats.admin_q_pause++;
  2339. u64_stats_update_end(&adapter->syncp);
  2340. adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
  2341. set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  2342. }
  2343. }
  2344. static void ena_update_hints(struct ena_adapter *adapter,
  2345. struct ena_admin_ena_hw_hints *hints)
  2346. {
  2347. struct net_device *netdev = adapter->netdev;
  2348. if (hints->admin_completion_tx_timeout)
  2349. adapter->ena_dev->admin_queue.completion_timeout =
  2350. hints->admin_completion_tx_timeout * 1000;
  2351. if (hints->mmio_read_timeout)
  2352. /* convert to usec */
  2353. adapter->ena_dev->mmio_read.reg_read_to =
  2354. hints->mmio_read_timeout * 1000;
  2355. if (hints->missed_tx_completion_count_threshold_to_reset)
  2356. adapter->missing_tx_completion_threshold =
  2357. hints->missed_tx_completion_count_threshold_to_reset;
  2358. if (hints->missing_tx_completion_timeout) {
  2359. if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
  2360. adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
  2361. else
  2362. adapter->missing_tx_completion_to =
  2363. msecs_to_jiffies(hints->missing_tx_completion_timeout);
  2364. }
  2365. if (hints->netdev_wd_timeout)
  2366. netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
  2367. if (hints->driver_watchdog_timeout) {
  2368. if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
  2369. adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
  2370. else
  2371. adapter->keep_alive_timeout =
  2372. msecs_to_jiffies(hints->driver_watchdog_timeout);
  2373. }
  2374. }
  2375. static void ena_update_host_info(struct ena_admin_host_info *host_info,
  2376. struct net_device *netdev)
  2377. {
  2378. host_info->supported_network_features[0] =
  2379. netdev->features & GENMASK_ULL(31, 0);
  2380. host_info->supported_network_features[1] =
  2381. (netdev->features & GENMASK_ULL(63, 32)) >> 32;
  2382. }
  2383. static void ena_timer_service(struct timer_list *t)
  2384. {
  2385. struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
  2386. u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
  2387. struct ena_admin_host_info *host_info =
  2388. adapter->ena_dev->host_attr.host_info;
  2389. check_for_missing_keep_alive(adapter);
  2390. check_for_admin_com_state(adapter);
  2391. check_for_missing_completions(adapter);
  2392. check_for_empty_rx_ring(adapter);
  2393. if (debug_area)
  2394. ena_dump_stats_to_buf(adapter, debug_area);
  2395. if (host_info)
  2396. ena_update_host_info(host_info, adapter->netdev);
  2397. if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
  2398. netif_err(adapter, drv, adapter->netdev,
  2399. "Trigger reset is on\n");
  2400. ena_dump_stats_to_dmesg(adapter);
  2401. queue_work(ena_wq, &adapter->reset_task);
  2402. return;
  2403. }
  2404. /* Reset the timer */
  2405. mod_timer(&adapter->timer_service, jiffies + HZ);
  2406. }
  2407. static int ena_calc_io_queue_num(struct pci_dev *pdev,
  2408. struct ena_com_dev *ena_dev,
  2409. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  2410. {
  2411. int io_sq_num, io_queue_num;
  2412. /* In case of LLQ use the llq number in the get feature cmd */
  2413. if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  2414. io_sq_num = get_feat_ctx->max_queues.max_llq_num;
  2415. if (io_sq_num == 0) {
  2416. dev_err(&pdev->dev,
  2417. "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
  2418. ena_dev->tx_mem_queue_type =
  2419. ENA_ADMIN_PLACEMENT_POLICY_HOST;
  2420. io_sq_num = get_feat_ctx->max_queues.max_sq_num;
  2421. }
  2422. } else {
  2423. io_sq_num = get_feat_ctx->max_queues.max_sq_num;
  2424. }
  2425. io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
  2426. io_queue_num = min_t(int, io_queue_num, io_sq_num);
  2427. io_queue_num = min_t(int, io_queue_num,
  2428. get_feat_ctx->max_queues.max_cq_num);
  2429. /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
  2430. io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
  2431. if (unlikely(!io_queue_num)) {
  2432. dev_err(&pdev->dev, "The device doesn't have io queues\n");
  2433. return -EFAULT;
  2434. }
  2435. return io_queue_num;
  2436. }
  2437. static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
  2438. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  2439. {
  2440. bool has_mem_bar;
  2441. has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
  2442. /* Enable push mode if device supports LLQ */
  2443. if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
  2444. ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
  2445. else
  2446. ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
  2447. }
  2448. static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
  2449. struct net_device *netdev)
  2450. {
  2451. netdev_features_t dev_features = 0;
  2452. /* Set offload features */
  2453. if (feat->offload.tx &
  2454. ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
  2455. dev_features |= NETIF_F_IP_CSUM;
  2456. if (feat->offload.tx &
  2457. ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
  2458. dev_features |= NETIF_F_IPV6_CSUM;
  2459. if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
  2460. dev_features |= NETIF_F_TSO;
  2461. if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
  2462. dev_features |= NETIF_F_TSO6;
  2463. if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
  2464. dev_features |= NETIF_F_TSO_ECN;
  2465. if (feat->offload.rx_supported &
  2466. ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
  2467. dev_features |= NETIF_F_RXCSUM;
  2468. if (feat->offload.rx_supported &
  2469. ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
  2470. dev_features |= NETIF_F_RXCSUM;
  2471. netdev->features =
  2472. dev_features |
  2473. NETIF_F_SG |
  2474. NETIF_F_RXHASH |
  2475. NETIF_F_HIGHDMA;
  2476. netdev->hw_features |= netdev->features;
  2477. netdev->vlan_features |= netdev->features;
  2478. }
  2479. static void ena_set_conf_feat_params(struct ena_adapter *adapter,
  2480. struct ena_com_dev_get_features_ctx *feat)
  2481. {
  2482. struct net_device *netdev = adapter->netdev;
  2483. /* Copy mac address */
  2484. if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
  2485. eth_hw_addr_random(netdev);
  2486. ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
  2487. } else {
  2488. ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
  2489. ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
  2490. }
  2491. /* Set offload features */
  2492. ena_set_dev_offloads(feat, netdev);
  2493. adapter->max_mtu = feat->dev_attr.max_mtu;
  2494. netdev->max_mtu = adapter->max_mtu;
  2495. netdev->min_mtu = ENA_MIN_MTU;
  2496. }
  2497. static int ena_rss_init_default(struct ena_adapter *adapter)
  2498. {
  2499. struct ena_com_dev *ena_dev = adapter->ena_dev;
  2500. struct device *dev = &adapter->pdev->dev;
  2501. int rc, i;
  2502. u32 val;
  2503. rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
  2504. if (unlikely(rc)) {
  2505. dev_err(dev, "Cannot init indirect table\n");
  2506. goto err_rss_init;
  2507. }
  2508. for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
  2509. val = ethtool_rxfh_indir_default(i, adapter->num_queues);
  2510. rc = ena_com_indirect_table_fill_entry(ena_dev, i,
  2511. ENA_IO_RXQ_IDX(val));
  2512. if (unlikely(rc && (rc != -EOPNOTSUPP))) {
  2513. dev_err(dev, "Cannot fill indirect table\n");
  2514. goto err_fill_indir;
  2515. }
  2516. }
  2517. rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
  2518. ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
  2519. if (unlikely(rc && (rc != -EOPNOTSUPP))) {
  2520. dev_err(dev, "Cannot fill hash function\n");
  2521. goto err_fill_indir;
  2522. }
  2523. rc = ena_com_set_default_hash_ctrl(ena_dev);
  2524. if (unlikely(rc && (rc != -EOPNOTSUPP))) {
  2525. dev_err(dev, "Cannot fill hash control\n");
  2526. goto err_fill_indir;
  2527. }
  2528. return 0;
  2529. err_fill_indir:
  2530. ena_com_rss_destroy(ena_dev);
  2531. err_rss_init:
  2532. return rc;
  2533. }
  2534. static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
  2535. {
  2536. int release_bars;
  2537. if (ena_dev->mem_bar)
  2538. devm_iounmap(&pdev->dev, ena_dev->mem_bar);
  2539. if (ena_dev->reg_bar)
  2540. devm_iounmap(&pdev->dev, ena_dev->reg_bar);
  2541. release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
  2542. pci_release_selected_regions(pdev, release_bars);
  2543. }
  2544. static int ena_calc_queue_size(struct pci_dev *pdev,
  2545. struct ena_com_dev *ena_dev,
  2546. u16 *max_tx_sgl_size,
  2547. u16 *max_rx_sgl_size,
  2548. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  2549. {
  2550. u32 queue_size = ENA_DEFAULT_RING_SIZE;
  2551. queue_size = min_t(u32, queue_size,
  2552. get_feat_ctx->max_queues.max_cq_depth);
  2553. queue_size = min_t(u32, queue_size,
  2554. get_feat_ctx->max_queues.max_sq_depth);
  2555. if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
  2556. queue_size = min_t(u32, queue_size,
  2557. get_feat_ctx->max_queues.max_llq_depth);
  2558. queue_size = rounddown_pow_of_two(queue_size);
  2559. if (unlikely(!queue_size)) {
  2560. dev_err(&pdev->dev, "Invalid queue size\n");
  2561. return -EFAULT;
  2562. }
  2563. *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
  2564. get_feat_ctx->max_queues.max_packet_tx_descs);
  2565. *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
  2566. get_feat_ctx->max_queues.max_packet_rx_descs);
  2567. return queue_size;
  2568. }
  2569. /* ena_probe - Device Initialization Routine
  2570. * @pdev: PCI device information struct
  2571. * @ent: entry in ena_pci_tbl
  2572. *
  2573. * Returns 0 on success, negative on failure
  2574. *
  2575. * ena_probe initializes an adapter identified by a pci_dev structure.
  2576. * The OS initialization, configuring of the adapter private structure,
  2577. * and a hardware reset occur.
  2578. */
  2579. static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2580. {
  2581. struct ena_com_dev_get_features_ctx get_feat_ctx;
  2582. static int version_printed;
  2583. struct net_device *netdev;
  2584. struct ena_adapter *adapter;
  2585. struct ena_com_dev *ena_dev = NULL;
  2586. static int adapters_found;
  2587. int io_queue_num, bars, rc;
  2588. int queue_size;
  2589. u16 tx_sgl_size = 0;
  2590. u16 rx_sgl_size = 0;
  2591. bool wd_state;
  2592. dev_dbg(&pdev->dev, "%s\n", __func__);
  2593. if (version_printed++ == 0)
  2594. dev_info(&pdev->dev, "%s", version);
  2595. rc = pci_enable_device_mem(pdev);
  2596. if (rc) {
  2597. dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
  2598. return rc;
  2599. }
  2600. pci_set_master(pdev);
  2601. ena_dev = vzalloc(sizeof(*ena_dev));
  2602. if (!ena_dev) {
  2603. rc = -ENOMEM;
  2604. goto err_disable_device;
  2605. }
  2606. bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
  2607. rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
  2608. if (rc) {
  2609. dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
  2610. rc);
  2611. goto err_free_ena_dev;
  2612. }
  2613. ena_dev->reg_bar = devm_ioremap(&pdev->dev,
  2614. pci_resource_start(pdev, ENA_REG_BAR),
  2615. pci_resource_len(pdev, ENA_REG_BAR));
  2616. if (!ena_dev->reg_bar) {
  2617. dev_err(&pdev->dev, "failed to remap regs bar\n");
  2618. rc = -EFAULT;
  2619. goto err_free_region;
  2620. }
  2621. ena_dev->dmadev = &pdev->dev;
  2622. rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
  2623. if (rc) {
  2624. dev_err(&pdev->dev, "ena device init failed\n");
  2625. if (rc == -ETIME)
  2626. rc = -EPROBE_DEFER;
  2627. goto err_free_region;
  2628. }
  2629. ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
  2630. if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  2631. ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
  2632. pci_resource_start(pdev, ENA_MEM_BAR),
  2633. pci_resource_len(pdev, ENA_MEM_BAR));
  2634. if (!ena_dev->mem_bar) {
  2635. rc = -EFAULT;
  2636. goto err_device_destroy;
  2637. }
  2638. }
  2639. /* initial Tx interrupt delay, Assumes 1 usec granularity.
  2640. * Updated during device initialization with the real granularity
  2641. */
  2642. ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
  2643. io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
  2644. queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
  2645. &rx_sgl_size, &get_feat_ctx);
  2646. if ((queue_size <= 0) || (io_queue_num <= 0)) {
  2647. rc = -EFAULT;
  2648. goto err_device_destroy;
  2649. }
  2650. dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
  2651. io_queue_num, queue_size);
  2652. /* dev zeroed in init_etherdev */
  2653. netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
  2654. if (!netdev) {
  2655. dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
  2656. rc = -ENOMEM;
  2657. goto err_device_destroy;
  2658. }
  2659. SET_NETDEV_DEV(netdev, &pdev->dev);
  2660. adapter = netdev_priv(netdev);
  2661. pci_set_drvdata(pdev, adapter);
  2662. adapter->ena_dev = ena_dev;
  2663. adapter->netdev = netdev;
  2664. adapter->pdev = pdev;
  2665. ena_set_conf_feat_params(adapter, &get_feat_ctx);
  2666. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  2667. adapter->reset_reason = ENA_REGS_RESET_NORMAL;
  2668. adapter->tx_ring_size = queue_size;
  2669. adapter->rx_ring_size = queue_size;
  2670. adapter->max_tx_sgl_size = tx_sgl_size;
  2671. adapter->max_rx_sgl_size = rx_sgl_size;
  2672. adapter->num_queues = io_queue_num;
  2673. adapter->last_monitored_tx_qid = 0;
  2674. adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
  2675. adapter->wd_state = wd_state;
  2676. snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
  2677. rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
  2678. if (rc) {
  2679. dev_err(&pdev->dev,
  2680. "Failed to query interrupt moderation feature\n");
  2681. goto err_netdev_destroy;
  2682. }
  2683. ena_init_io_rings(adapter);
  2684. netdev->netdev_ops = &ena_netdev_ops;
  2685. netdev->watchdog_timeo = TX_TIMEOUT;
  2686. ena_set_ethtool_ops(netdev);
  2687. netdev->priv_flags |= IFF_UNICAST_FLT;
  2688. u64_stats_init(&adapter->syncp);
  2689. rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
  2690. if (rc) {
  2691. dev_err(&pdev->dev,
  2692. "Failed to enable and set the admin interrupts\n");
  2693. goto err_worker_destroy;
  2694. }
  2695. rc = ena_rss_init_default(adapter);
  2696. if (rc && (rc != -EOPNOTSUPP)) {
  2697. dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
  2698. goto err_free_msix;
  2699. }
  2700. ena_config_debug_area(adapter);
  2701. memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
  2702. netif_carrier_off(netdev);
  2703. rc = register_netdev(netdev);
  2704. if (rc) {
  2705. dev_err(&pdev->dev, "Cannot register net device\n");
  2706. goto err_rss;
  2707. }
  2708. INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
  2709. adapter->last_keep_alive_jiffies = jiffies;
  2710. adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
  2711. adapter->missing_tx_completion_to = TX_TIMEOUT;
  2712. adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
  2713. ena_update_hints(adapter, &get_feat_ctx.hw_hints);
  2714. timer_setup(&adapter->timer_service, ena_timer_service, 0);
  2715. mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
  2716. dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
  2717. DEVICE_NAME, (long)pci_resource_start(pdev, 0),
  2718. netdev->dev_addr, io_queue_num);
  2719. set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
  2720. adapters_found++;
  2721. return 0;
  2722. err_rss:
  2723. ena_com_delete_debug_area(ena_dev);
  2724. ena_com_rss_destroy(ena_dev);
  2725. err_free_msix:
  2726. ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
  2727. ena_free_mgmnt_irq(adapter);
  2728. ena_disable_msix(adapter);
  2729. err_worker_destroy:
  2730. ena_com_destroy_interrupt_moderation(ena_dev);
  2731. del_timer(&adapter->timer_service);
  2732. err_netdev_destroy:
  2733. free_netdev(netdev);
  2734. err_device_destroy:
  2735. ena_com_delete_host_info(ena_dev);
  2736. ena_com_admin_destroy(ena_dev);
  2737. err_free_region:
  2738. ena_release_bars(ena_dev, pdev);
  2739. err_free_ena_dev:
  2740. vfree(ena_dev);
  2741. err_disable_device:
  2742. pci_disable_device(pdev);
  2743. return rc;
  2744. }
  2745. /*****************************************************************************/
  2746. /* ena_remove - Device Removal Routine
  2747. * @pdev: PCI device information struct
  2748. *
  2749. * ena_remove is called by the PCI subsystem to alert the driver
  2750. * that it should release a PCI device.
  2751. */
  2752. static void ena_remove(struct pci_dev *pdev)
  2753. {
  2754. struct ena_adapter *adapter = pci_get_drvdata(pdev);
  2755. struct ena_com_dev *ena_dev;
  2756. struct net_device *netdev;
  2757. ena_dev = adapter->ena_dev;
  2758. netdev = adapter->netdev;
  2759. #ifdef CONFIG_RFS_ACCEL
  2760. if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
  2761. free_irq_cpu_rmap(netdev->rx_cpu_rmap);
  2762. netdev->rx_cpu_rmap = NULL;
  2763. }
  2764. #endif /* CONFIG_RFS_ACCEL */
  2765. unregister_netdev(netdev);
  2766. del_timer_sync(&adapter->timer_service);
  2767. cancel_work_sync(&adapter->reset_task);
  2768. /* Reset the device only if the device is running. */
  2769. if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
  2770. ena_com_dev_reset(ena_dev, adapter->reset_reason);
  2771. ena_free_mgmnt_irq(adapter);
  2772. ena_disable_msix(adapter);
  2773. free_netdev(netdev);
  2774. ena_com_mmio_reg_read_request_destroy(ena_dev);
  2775. ena_com_abort_admin_commands(ena_dev);
  2776. ena_com_wait_for_abort_completion(ena_dev);
  2777. ena_com_admin_destroy(ena_dev);
  2778. ena_com_rss_destroy(ena_dev);
  2779. ena_com_delete_debug_area(ena_dev);
  2780. ena_com_delete_host_info(ena_dev);
  2781. ena_release_bars(ena_dev, pdev);
  2782. pci_disable_device(pdev);
  2783. ena_com_destroy_interrupt_moderation(ena_dev);
  2784. vfree(ena_dev);
  2785. }
  2786. #ifdef CONFIG_PM
  2787. /* ena_suspend - PM suspend callback
  2788. * @pdev: PCI device information struct
  2789. * @state:power state
  2790. */
  2791. static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
  2792. {
  2793. struct ena_adapter *adapter = pci_get_drvdata(pdev);
  2794. u64_stats_update_begin(&adapter->syncp);
  2795. adapter->dev_stats.suspend++;
  2796. u64_stats_update_end(&adapter->syncp);
  2797. rtnl_lock();
  2798. if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
  2799. dev_err(&pdev->dev,
  2800. "ignoring device reset request as the device is being suspended\n");
  2801. clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
  2802. }
  2803. ena_destroy_device(adapter);
  2804. rtnl_unlock();
  2805. return 0;
  2806. }
  2807. /* ena_resume - PM resume callback
  2808. * @pdev: PCI device information struct
  2809. *
  2810. */
  2811. static int ena_resume(struct pci_dev *pdev)
  2812. {
  2813. struct ena_adapter *adapter = pci_get_drvdata(pdev);
  2814. int rc;
  2815. u64_stats_update_begin(&adapter->syncp);
  2816. adapter->dev_stats.resume++;
  2817. u64_stats_update_end(&adapter->syncp);
  2818. rtnl_lock();
  2819. rc = ena_restore_device(adapter);
  2820. rtnl_unlock();
  2821. return rc;
  2822. }
  2823. #endif
  2824. static struct pci_driver ena_pci_driver = {
  2825. .name = DRV_MODULE_NAME,
  2826. .id_table = ena_pci_tbl,
  2827. .probe = ena_probe,
  2828. .remove = ena_remove,
  2829. #ifdef CONFIG_PM
  2830. .suspend = ena_suspend,
  2831. .resume = ena_resume,
  2832. #endif
  2833. .sriov_configure = pci_sriov_configure_simple,
  2834. };
  2835. static int __init ena_init(void)
  2836. {
  2837. pr_info("%s", version);
  2838. ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
  2839. if (!ena_wq) {
  2840. pr_err("Failed to create workqueue\n");
  2841. return -ENOMEM;
  2842. }
  2843. return pci_register_driver(&ena_pci_driver);
  2844. }
  2845. static void __exit ena_cleanup(void)
  2846. {
  2847. pci_unregister_driver(&ena_pci_driver);
  2848. if (ena_wq) {
  2849. destroy_workqueue(ena_wq);
  2850. ena_wq = NULL;
  2851. }
  2852. }
  2853. /******************************************************************************
  2854. ******************************** AENQ Handlers *******************************
  2855. *****************************************************************************/
  2856. /* ena_update_on_link_change:
  2857. * Notify the network interface about the change in link status
  2858. */
  2859. static void ena_update_on_link_change(void *adapter_data,
  2860. struct ena_admin_aenq_entry *aenq_e)
  2861. {
  2862. struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
  2863. struct ena_admin_aenq_link_change_desc *aenq_desc =
  2864. (struct ena_admin_aenq_link_change_desc *)aenq_e;
  2865. int status = aenq_desc->flags &
  2866. ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
  2867. if (status) {
  2868. netdev_dbg(adapter->netdev, "%s\n", __func__);
  2869. set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
  2870. if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
  2871. netif_carrier_on(adapter->netdev);
  2872. } else {
  2873. clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
  2874. netif_carrier_off(adapter->netdev);
  2875. }
  2876. }
  2877. static void ena_keep_alive_wd(void *adapter_data,
  2878. struct ena_admin_aenq_entry *aenq_e)
  2879. {
  2880. struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
  2881. struct ena_admin_aenq_keep_alive_desc *desc;
  2882. u64 rx_drops;
  2883. desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
  2884. adapter->last_keep_alive_jiffies = jiffies;
  2885. rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
  2886. u64_stats_update_begin(&adapter->syncp);
  2887. adapter->dev_stats.rx_drops = rx_drops;
  2888. u64_stats_update_end(&adapter->syncp);
  2889. }
  2890. static void ena_notification(void *adapter_data,
  2891. struct ena_admin_aenq_entry *aenq_e)
  2892. {
  2893. struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
  2894. struct ena_admin_ena_hw_hints *hints;
  2895. WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
  2896. "Invalid group(%x) expected %x\n",
  2897. aenq_e->aenq_common_desc.group,
  2898. ENA_ADMIN_NOTIFICATION);
  2899. switch (aenq_e->aenq_common_desc.syndrom) {
  2900. case ENA_ADMIN_UPDATE_HINTS:
  2901. hints = (struct ena_admin_ena_hw_hints *)
  2902. (&aenq_e->inline_data_w4);
  2903. ena_update_hints(adapter, hints);
  2904. break;
  2905. default:
  2906. netif_err(adapter, drv, adapter->netdev,
  2907. "Invalid aenq notification link state %d\n",
  2908. aenq_e->aenq_common_desc.syndrom);
  2909. }
  2910. }
  2911. /* This handler will called for unknown event group or unimplemented handlers*/
  2912. static void unimplemented_aenq_handler(void *data,
  2913. struct ena_admin_aenq_entry *aenq_e)
  2914. {
  2915. struct ena_adapter *adapter = (struct ena_adapter *)data;
  2916. netif_err(adapter, drv, adapter->netdev,
  2917. "Unknown event was received or event with unimplemented handler\n");
  2918. }
  2919. static struct ena_aenq_handlers aenq_handlers = {
  2920. .handlers = {
  2921. [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
  2922. [ENA_ADMIN_NOTIFICATION] = ena_notification,
  2923. [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
  2924. },
  2925. .unimplemented_handler = unimplemented_aenq_handler
  2926. };
  2927. module_init(ena_init);
  2928. module_exit(ena_cleanup);