i40evf_main.c 109 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include "i40evf.h"
  4. #include "i40e_prototype.h"
  5. #include "i40evf_client.h"
  6. /* All i40evf tracepoints are defined by the include below, which must
  7. * be included exactly once across the whole kernel with
  8. * CREATE_TRACE_POINTS defined
  9. */
  10. #define CREATE_TRACE_POINTS
  11. #include "i40e_trace.h"
  12. static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
  13. static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
  14. static int i40evf_close(struct net_device *netdev);
  15. char i40evf_driver_name[] = "i40evf";
  16. static const char i40evf_driver_string[] =
  17. "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
  18. #define DRV_KERN "-k"
  19. #define DRV_VERSION_MAJOR 3
  20. #define DRV_VERSION_MINOR 2
  21. #define DRV_VERSION_BUILD 2
  22. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  23. __stringify(DRV_VERSION_MINOR) "." \
  24. __stringify(DRV_VERSION_BUILD) \
  25. DRV_KERN
  26. const char i40evf_driver_version[] = DRV_VERSION;
  27. static const char i40evf_copyright[] =
  28. "Copyright (c) 2013 - 2015 Intel Corporation.";
  29. /* i40evf_pci_tbl - PCI Device ID Table
  30. *
  31. * Wildcard entries (PCI_ANY_ID) should come last
  32. * Last entry must be all 0s
  33. *
  34. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  35. * Class, Class Mask, private data (not used) }
  36. */
  37. static const struct pci_device_id i40evf_pci_tbl[] = {
  38. {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
  39. {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
  40. {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
  41. {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
  42. /* required last entry */
  43. {0, }
  44. };
  45. MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
  46. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  47. MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
  48. MODULE_LICENSE("GPL");
  49. MODULE_VERSION(DRV_VERSION);
  50. static struct workqueue_struct *i40evf_wq;
  51. /**
  52. * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
  53. * @hw: pointer to the HW structure
  54. * @mem: ptr to mem struct to fill out
  55. * @size: size of memory requested
  56. * @alignment: what to align the allocation to
  57. **/
  58. i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
  59. struct i40e_dma_mem *mem,
  60. u64 size, u32 alignment)
  61. {
  62. struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
  63. if (!mem)
  64. return I40E_ERR_PARAM;
  65. mem->size = ALIGN(size, alignment);
  66. mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
  67. (dma_addr_t *)&mem->pa, GFP_KERNEL);
  68. if (mem->va)
  69. return 0;
  70. else
  71. return I40E_ERR_NO_MEMORY;
  72. }
  73. /**
  74. * i40evf_free_dma_mem_d - OS specific memory free for shared code
  75. * @hw: pointer to the HW structure
  76. * @mem: ptr to mem struct to free
  77. **/
  78. i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
  79. {
  80. struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
  81. if (!mem || !mem->va)
  82. return I40E_ERR_PARAM;
  83. dma_free_coherent(&adapter->pdev->dev, mem->size,
  84. mem->va, (dma_addr_t)mem->pa);
  85. return 0;
  86. }
  87. /**
  88. * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
  89. * @hw: pointer to the HW structure
  90. * @mem: ptr to mem struct to fill out
  91. * @size: size of memory requested
  92. **/
  93. i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
  94. struct i40e_virt_mem *mem, u32 size)
  95. {
  96. if (!mem)
  97. return I40E_ERR_PARAM;
  98. mem->size = size;
  99. mem->va = kzalloc(size, GFP_KERNEL);
  100. if (mem->va)
  101. return 0;
  102. else
  103. return I40E_ERR_NO_MEMORY;
  104. }
  105. /**
  106. * i40evf_free_virt_mem_d - OS specific memory free for shared code
  107. * @hw: pointer to the HW structure
  108. * @mem: ptr to mem struct to free
  109. **/
  110. i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
  111. struct i40e_virt_mem *mem)
  112. {
  113. if (!mem)
  114. return I40E_ERR_PARAM;
  115. /* it's ok to kfree a NULL pointer */
  116. kfree(mem->va);
  117. return 0;
  118. }
  119. /**
  120. * i40evf_debug_d - OS dependent version of debug printing
  121. * @hw: pointer to the HW structure
  122. * @mask: debug level mask
  123. * @fmt_str: printf-type format description
  124. **/
  125. void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
  126. {
  127. char buf[512];
  128. va_list argptr;
  129. if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
  130. return;
  131. va_start(argptr, fmt_str);
  132. vsnprintf(buf, sizeof(buf), fmt_str, argptr);
  133. va_end(argptr);
  134. /* the debug string is already formatted with a newline */
  135. pr_info("%s", buf);
  136. }
  137. /**
  138. * i40evf_schedule_reset - Set the flags and schedule a reset event
  139. * @adapter: board private structure
  140. **/
  141. void i40evf_schedule_reset(struct i40evf_adapter *adapter)
  142. {
  143. if (!(adapter->flags &
  144. (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
  145. adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
  146. schedule_work(&adapter->reset_task);
  147. }
  148. }
  149. /**
  150. * i40evf_tx_timeout - Respond to a Tx Hang
  151. * @netdev: network interface device structure
  152. **/
  153. static void i40evf_tx_timeout(struct net_device *netdev)
  154. {
  155. struct i40evf_adapter *adapter = netdev_priv(netdev);
  156. adapter->tx_timeout_count++;
  157. i40evf_schedule_reset(adapter);
  158. }
  159. /**
  160. * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
  161. * @adapter: board private structure
  162. **/
  163. static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
  164. {
  165. struct i40e_hw *hw = &adapter->hw;
  166. if (!adapter->msix_entries)
  167. return;
  168. wr32(hw, I40E_VFINT_DYN_CTL01, 0);
  169. /* read flush */
  170. rd32(hw, I40E_VFGEN_RSTAT);
  171. synchronize_irq(adapter->msix_entries[0].vector);
  172. }
  173. /**
  174. * i40evf_misc_irq_enable - Enable default interrupt generation settings
  175. * @adapter: board private structure
  176. **/
  177. static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
  178. {
  179. struct i40e_hw *hw = &adapter->hw;
  180. wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
  181. I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
  182. wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
  183. /* read flush */
  184. rd32(hw, I40E_VFGEN_RSTAT);
  185. }
  186. /**
  187. * i40evf_irq_disable - Mask off interrupt generation on the NIC
  188. * @adapter: board private structure
  189. **/
  190. static void i40evf_irq_disable(struct i40evf_adapter *adapter)
  191. {
  192. int i;
  193. struct i40e_hw *hw = &adapter->hw;
  194. if (!adapter->msix_entries)
  195. return;
  196. for (i = 1; i < adapter->num_msix_vectors; i++) {
  197. wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
  198. synchronize_irq(adapter->msix_entries[i].vector);
  199. }
  200. /* read flush */
  201. rd32(hw, I40E_VFGEN_RSTAT);
  202. }
  203. /**
  204. * i40evf_irq_enable_queues - Enable interrupt for specified queues
  205. * @adapter: board private structure
  206. * @mask: bitmap of queues to enable
  207. **/
  208. void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
  209. {
  210. struct i40e_hw *hw = &adapter->hw;
  211. int i;
  212. for (i = 1; i < adapter->num_msix_vectors; i++) {
  213. if (mask & BIT(i - 1)) {
  214. wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
  215. I40E_VFINT_DYN_CTLN1_INTENA_MASK |
  216. I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
  217. }
  218. }
  219. }
  220. /**
  221. * i40evf_irq_enable - Enable default interrupt generation settings
  222. * @adapter: board private structure
  223. * @flush: boolean value whether to run rd32()
  224. **/
  225. void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
  226. {
  227. struct i40e_hw *hw = &adapter->hw;
  228. i40evf_misc_irq_enable(adapter);
  229. i40evf_irq_enable_queues(adapter, ~0);
  230. if (flush)
  231. rd32(hw, I40E_VFGEN_RSTAT);
  232. }
  233. /**
  234. * i40evf_msix_aq - Interrupt handler for vector 0
  235. * @irq: interrupt number
  236. * @data: pointer to netdev
  237. **/
  238. static irqreturn_t i40evf_msix_aq(int irq, void *data)
  239. {
  240. struct net_device *netdev = data;
  241. struct i40evf_adapter *adapter = netdev_priv(netdev);
  242. struct i40e_hw *hw = &adapter->hw;
  243. /* handle non-queue interrupts, these reads clear the registers */
  244. rd32(hw, I40E_VFINT_ICR01);
  245. rd32(hw, I40E_VFINT_ICR0_ENA1);
  246. /* schedule work on the private workqueue */
  247. schedule_work(&adapter->adminq_task);
  248. return IRQ_HANDLED;
  249. }
  250. /**
  251. * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
  252. * @irq: interrupt number
  253. * @data: pointer to a q_vector
  254. **/
  255. static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
  256. {
  257. struct i40e_q_vector *q_vector = data;
  258. if (!q_vector->tx.ring && !q_vector->rx.ring)
  259. return IRQ_HANDLED;
  260. napi_schedule_irqoff(&q_vector->napi);
  261. return IRQ_HANDLED;
  262. }
  263. /**
  264. * i40evf_map_vector_to_rxq - associate irqs with rx queues
  265. * @adapter: board private structure
  266. * @v_idx: interrupt number
  267. * @r_idx: queue number
  268. **/
  269. static void
  270. i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
  271. {
  272. struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
  273. struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
  274. struct i40e_hw *hw = &adapter->hw;
  275. rx_ring->q_vector = q_vector;
  276. rx_ring->next = q_vector->rx.ring;
  277. rx_ring->vsi = &adapter->vsi;
  278. q_vector->rx.ring = rx_ring;
  279. q_vector->rx.count++;
  280. q_vector->rx.next_update = jiffies + 1;
  281. q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
  282. q_vector->ring_mask |= BIT(r_idx);
  283. wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
  284. q_vector->rx.current_itr);
  285. q_vector->rx.current_itr = q_vector->rx.target_itr;
  286. }
  287. /**
  288. * i40evf_map_vector_to_txq - associate irqs with tx queues
  289. * @adapter: board private structure
  290. * @v_idx: interrupt number
  291. * @t_idx: queue number
  292. **/
  293. static void
  294. i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
  295. {
  296. struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
  297. struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
  298. struct i40e_hw *hw = &adapter->hw;
  299. tx_ring->q_vector = q_vector;
  300. tx_ring->next = q_vector->tx.ring;
  301. tx_ring->vsi = &adapter->vsi;
  302. q_vector->tx.ring = tx_ring;
  303. q_vector->tx.count++;
  304. q_vector->tx.next_update = jiffies + 1;
  305. q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
  306. q_vector->num_ringpairs++;
  307. wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
  308. q_vector->tx.target_itr);
  309. q_vector->tx.current_itr = q_vector->tx.target_itr;
  310. }
  311. /**
  312. * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
  313. * @adapter: board private structure to initialize
  314. *
  315. * This function maps descriptor rings to the queue-specific vectors
  316. * we were allotted through the MSI-X enabling code. Ideally, we'd have
  317. * one vector per ring/queue, but on a constrained vector budget, we
  318. * group the rings as "efficiently" as possible. You would add new
  319. * mapping configurations in here.
  320. **/
  321. static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
  322. {
  323. int rings_remaining = adapter->num_active_queues;
  324. int ridx = 0, vidx = 0;
  325. int q_vectors;
  326. q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  327. for (; ridx < rings_remaining; ridx++) {
  328. i40evf_map_vector_to_rxq(adapter, vidx, ridx);
  329. i40evf_map_vector_to_txq(adapter, vidx, ridx);
  330. /* In the case where we have more queues than vectors, continue
  331. * round-robin on vectors until all queues are mapped.
  332. */
  333. if (++vidx >= q_vectors)
  334. vidx = 0;
  335. }
  336. adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
  337. }
  338. /**
  339. * i40evf_irq_affinity_notify - Callback for affinity changes
  340. * @notify: context as to what irq was changed
  341. * @mask: the new affinity mask
  342. *
  343. * This is a callback function used by the irq_set_affinity_notifier function
  344. * so that we may register to receive changes to the irq affinity masks.
  345. **/
  346. static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
  347. const cpumask_t *mask)
  348. {
  349. struct i40e_q_vector *q_vector =
  350. container_of(notify, struct i40e_q_vector, affinity_notify);
  351. cpumask_copy(&q_vector->affinity_mask, mask);
  352. }
  353. /**
  354. * i40evf_irq_affinity_release - Callback for affinity notifier release
  355. * @ref: internal core kernel usage
  356. *
  357. * This is a callback function used by the irq_set_affinity_notifier function
  358. * to inform the current notification subscriber that they will no longer
  359. * receive notifications.
  360. **/
  361. static void i40evf_irq_affinity_release(struct kref *ref) {}
  362. /**
  363. * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
  364. * @adapter: board private structure
  365. * @basename: device basename
  366. *
  367. * Allocates MSI-X vectors for tx and rx handling, and requests
  368. * interrupts from the kernel.
  369. **/
  370. static int
  371. i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
  372. {
  373. unsigned int vector, q_vectors;
  374. unsigned int rx_int_idx = 0, tx_int_idx = 0;
  375. int irq_num, err;
  376. int cpu;
  377. i40evf_irq_disable(adapter);
  378. /* Decrement for Other and TCP Timer vectors */
  379. q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  380. for (vector = 0; vector < q_vectors; vector++) {
  381. struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
  382. irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
  383. if (q_vector->tx.ring && q_vector->rx.ring) {
  384. snprintf(q_vector->name, sizeof(q_vector->name),
  385. "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
  386. tx_int_idx++;
  387. } else if (q_vector->rx.ring) {
  388. snprintf(q_vector->name, sizeof(q_vector->name),
  389. "i40evf-%s-rx-%d", basename, rx_int_idx++);
  390. } else if (q_vector->tx.ring) {
  391. snprintf(q_vector->name, sizeof(q_vector->name),
  392. "i40evf-%s-tx-%d", basename, tx_int_idx++);
  393. } else {
  394. /* skip this unused q_vector */
  395. continue;
  396. }
  397. err = request_irq(irq_num,
  398. i40evf_msix_clean_rings,
  399. 0,
  400. q_vector->name,
  401. q_vector);
  402. if (err) {
  403. dev_info(&adapter->pdev->dev,
  404. "Request_irq failed, error: %d\n", err);
  405. goto free_queue_irqs;
  406. }
  407. /* register for affinity change notifications */
  408. q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
  409. q_vector->affinity_notify.release =
  410. i40evf_irq_affinity_release;
  411. irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
  412. /* Spread the IRQ affinity hints across online CPUs. Note that
  413. * get_cpu_mask returns a mask with a permanent lifetime so
  414. * it's safe to use as a hint for irq_set_affinity_hint.
  415. */
  416. cpu = cpumask_local_spread(q_vector->v_idx, -1);
  417. irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
  418. }
  419. return 0;
  420. free_queue_irqs:
  421. while (vector) {
  422. vector--;
  423. irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
  424. irq_set_affinity_notifier(irq_num, NULL);
  425. irq_set_affinity_hint(irq_num, NULL);
  426. free_irq(irq_num, &adapter->q_vectors[vector]);
  427. }
  428. return err;
  429. }
  430. /**
  431. * i40evf_request_misc_irq - Initialize MSI-X interrupts
  432. * @adapter: board private structure
  433. *
  434. * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
  435. * vector is only for the admin queue, and stays active even when the netdev
  436. * is closed.
  437. **/
  438. static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
  439. {
  440. struct net_device *netdev = adapter->netdev;
  441. int err;
  442. snprintf(adapter->misc_vector_name,
  443. sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
  444. dev_name(&adapter->pdev->dev));
  445. err = request_irq(adapter->msix_entries[0].vector,
  446. &i40evf_msix_aq, 0,
  447. adapter->misc_vector_name, netdev);
  448. if (err) {
  449. dev_err(&adapter->pdev->dev,
  450. "request_irq for %s failed: %d\n",
  451. adapter->misc_vector_name, err);
  452. free_irq(adapter->msix_entries[0].vector, netdev);
  453. }
  454. return err;
  455. }
  456. /**
  457. * i40evf_free_traffic_irqs - Free MSI-X interrupts
  458. * @adapter: board private structure
  459. *
  460. * Frees all MSI-X vectors other than 0.
  461. **/
  462. static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
  463. {
  464. int vector, irq_num, q_vectors;
  465. if (!adapter->msix_entries)
  466. return;
  467. q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  468. for (vector = 0; vector < q_vectors; vector++) {
  469. irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
  470. irq_set_affinity_notifier(irq_num, NULL);
  471. irq_set_affinity_hint(irq_num, NULL);
  472. free_irq(irq_num, &adapter->q_vectors[vector]);
  473. }
  474. }
  475. /**
  476. * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
  477. * @adapter: board private structure
  478. *
  479. * Frees MSI-X vector 0.
  480. **/
  481. static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
  482. {
  483. struct net_device *netdev = adapter->netdev;
  484. if (!adapter->msix_entries)
  485. return;
  486. free_irq(adapter->msix_entries[0].vector, netdev);
  487. }
  488. /**
  489. * i40evf_configure_tx - Configure Transmit Unit after Reset
  490. * @adapter: board private structure
  491. *
  492. * Configure the Tx unit of the MAC after a reset.
  493. **/
  494. static void i40evf_configure_tx(struct i40evf_adapter *adapter)
  495. {
  496. struct i40e_hw *hw = &adapter->hw;
  497. int i;
  498. for (i = 0; i < adapter->num_active_queues; i++)
  499. adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
  500. }
  501. /**
  502. * i40evf_configure_rx - Configure Receive Unit after Reset
  503. * @adapter: board private structure
  504. *
  505. * Configure the Rx unit of the MAC after a reset.
  506. **/
  507. static void i40evf_configure_rx(struct i40evf_adapter *adapter)
  508. {
  509. unsigned int rx_buf_len = I40E_RXBUFFER_2048;
  510. struct i40e_hw *hw = &adapter->hw;
  511. int i;
  512. /* Legacy Rx will always default to a 2048 buffer size. */
  513. #if (PAGE_SIZE < 8192)
  514. if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
  515. struct net_device *netdev = adapter->netdev;
  516. /* For jumbo frames on systems with 4K pages we have to use
  517. * an order 1 page, so we might as well increase the size
  518. * of our Rx buffer to make better use of the available space
  519. */
  520. rx_buf_len = I40E_RXBUFFER_3072;
  521. /* We use a 1536 buffer size for configurations with
  522. * standard Ethernet mtu. On x86 this gives us enough room
  523. * for shared info and 192 bytes of padding.
  524. */
  525. if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
  526. (netdev->mtu <= ETH_DATA_LEN))
  527. rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
  528. }
  529. #endif
  530. for (i = 0; i < adapter->num_active_queues; i++) {
  531. adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
  532. adapter->rx_rings[i].rx_buf_len = rx_buf_len;
  533. if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
  534. clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
  535. else
  536. set_ring_build_skb_enabled(&adapter->rx_rings[i]);
  537. }
  538. }
  539. /**
  540. * i40evf_find_vlan - Search filter list for specific vlan filter
  541. * @adapter: board private structure
  542. * @vlan: vlan tag
  543. *
  544. * Returns ptr to the filter object or NULL. Must be called while holding the
  545. * mac_vlan_list_lock.
  546. **/
  547. static struct
  548. i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
  549. {
  550. struct i40evf_vlan_filter *f;
  551. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  552. if (vlan == f->vlan)
  553. return f;
  554. }
  555. return NULL;
  556. }
  557. /**
  558. * i40evf_add_vlan - Add a vlan filter to the list
  559. * @adapter: board private structure
  560. * @vlan: VLAN tag
  561. *
  562. * Returns ptr to the filter object or NULL when no memory available.
  563. **/
  564. static struct
  565. i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
  566. {
  567. struct i40evf_vlan_filter *f = NULL;
  568. spin_lock_bh(&adapter->mac_vlan_list_lock);
  569. f = i40evf_find_vlan(adapter, vlan);
  570. if (!f) {
  571. f = kzalloc(sizeof(*f), GFP_KERNEL);
  572. if (!f)
  573. goto clearout;
  574. f->vlan = vlan;
  575. INIT_LIST_HEAD(&f->list);
  576. list_add(&f->list, &adapter->vlan_filter_list);
  577. f->add = true;
  578. adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
  579. }
  580. clearout:
  581. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  582. return f;
  583. }
  584. /**
  585. * i40evf_del_vlan - Remove a vlan filter from the list
  586. * @adapter: board private structure
  587. * @vlan: VLAN tag
  588. **/
  589. static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
  590. {
  591. struct i40evf_vlan_filter *f;
  592. spin_lock_bh(&adapter->mac_vlan_list_lock);
  593. f = i40evf_find_vlan(adapter, vlan);
  594. if (f) {
  595. f->remove = true;
  596. adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
  597. }
  598. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  599. }
  600. /**
  601. * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
  602. * @netdev: network device struct
  603. * @proto: unused protocol data
  604. * @vid: VLAN tag
  605. **/
  606. static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
  607. __always_unused __be16 proto, u16 vid)
  608. {
  609. struct i40evf_adapter *adapter = netdev_priv(netdev);
  610. if (!VLAN_ALLOWED(adapter))
  611. return -EIO;
  612. if (i40evf_add_vlan(adapter, vid) == NULL)
  613. return -ENOMEM;
  614. return 0;
  615. }
  616. /**
  617. * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
  618. * @netdev: network device struct
  619. * @proto: unused protocol data
  620. * @vid: VLAN tag
  621. **/
  622. static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
  623. __always_unused __be16 proto, u16 vid)
  624. {
  625. struct i40evf_adapter *adapter = netdev_priv(netdev);
  626. if (VLAN_ALLOWED(adapter)) {
  627. i40evf_del_vlan(adapter, vid);
  628. return 0;
  629. }
  630. return -EIO;
  631. }
  632. /**
  633. * i40evf_find_filter - Search filter list for specific mac filter
  634. * @adapter: board private structure
  635. * @macaddr: the MAC address
  636. *
  637. * Returns ptr to the filter object or NULL. Must be called while holding the
  638. * mac_vlan_list_lock.
  639. **/
  640. static struct
  641. i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
  642. const u8 *macaddr)
  643. {
  644. struct i40evf_mac_filter *f;
  645. if (!macaddr)
  646. return NULL;
  647. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  648. if (ether_addr_equal(macaddr, f->macaddr))
  649. return f;
  650. }
  651. return NULL;
  652. }
  653. /**
  654. * i40e_add_filter - Add a mac filter to the filter list
  655. * @adapter: board private structure
  656. * @macaddr: the MAC address
  657. *
  658. * Returns ptr to the filter object or NULL when no memory available.
  659. **/
  660. static struct
  661. i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
  662. const u8 *macaddr)
  663. {
  664. struct i40evf_mac_filter *f;
  665. if (!macaddr)
  666. return NULL;
  667. f = i40evf_find_filter(adapter, macaddr);
  668. if (!f) {
  669. f = kzalloc(sizeof(*f), GFP_ATOMIC);
  670. if (!f)
  671. return f;
  672. ether_addr_copy(f->macaddr, macaddr);
  673. list_add_tail(&f->list, &adapter->mac_filter_list);
  674. f->add = true;
  675. adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
  676. } else {
  677. f->remove = false;
  678. }
  679. return f;
  680. }
  681. /**
  682. * i40evf_set_mac - NDO callback to set port mac address
  683. * @netdev: network interface device structure
  684. * @p: pointer to an address structure
  685. *
  686. * Returns 0 on success, negative on failure
  687. **/
  688. static int i40evf_set_mac(struct net_device *netdev, void *p)
  689. {
  690. struct i40evf_adapter *adapter = netdev_priv(netdev);
  691. struct i40e_hw *hw = &adapter->hw;
  692. struct i40evf_mac_filter *f;
  693. struct sockaddr *addr = p;
  694. if (!is_valid_ether_addr(addr->sa_data))
  695. return -EADDRNOTAVAIL;
  696. if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
  697. return 0;
  698. if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
  699. return -EPERM;
  700. spin_lock_bh(&adapter->mac_vlan_list_lock);
  701. f = i40evf_find_filter(adapter, hw->mac.addr);
  702. if (f) {
  703. f->remove = true;
  704. adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
  705. }
  706. f = i40evf_add_filter(adapter, addr->sa_data);
  707. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  708. if (f) {
  709. ether_addr_copy(hw->mac.addr, addr->sa_data);
  710. ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
  711. }
  712. return (f == NULL) ? -ENOMEM : 0;
  713. }
  714. /**
  715. * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
  716. * @netdev: the netdevice
  717. * @addr: address to add
  718. *
  719. * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
  720. * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
  721. */
  722. static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
  723. {
  724. struct i40evf_adapter *adapter = netdev_priv(netdev);
  725. if (i40evf_add_filter(adapter, addr))
  726. return 0;
  727. else
  728. return -ENOMEM;
  729. }
  730. /**
  731. * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
  732. * @netdev: the netdevice
  733. * @addr: address to add
  734. *
  735. * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
  736. * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
  737. */
  738. static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
  739. {
  740. struct i40evf_adapter *adapter = netdev_priv(netdev);
  741. struct i40evf_mac_filter *f;
  742. /* Under some circumstances, we might receive a request to delete
  743. * our own device address from our uc list. Because we store the
  744. * device address in the VSI's MAC/VLAN filter list, we need to ignore
  745. * such requests and not delete our device address from this list.
  746. */
  747. if (ether_addr_equal(addr, netdev->dev_addr))
  748. return 0;
  749. f = i40evf_find_filter(adapter, addr);
  750. if (f) {
  751. f->remove = true;
  752. adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
  753. }
  754. return 0;
  755. }
  756. /**
  757. * i40evf_set_rx_mode - NDO callback to set the netdev filters
  758. * @netdev: network interface device structure
  759. **/
  760. static void i40evf_set_rx_mode(struct net_device *netdev)
  761. {
  762. struct i40evf_adapter *adapter = netdev_priv(netdev);
  763. spin_lock_bh(&adapter->mac_vlan_list_lock);
  764. __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
  765. __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
  766. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  767. if (netdev->flags & IFF_PROMISC &&
  768. !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
  769. adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
  770. else if (!(netdev->flags & IFF_PROMISC) &&
  771. adapter->flags & I40EVF_FLAG_PROMISC_ON)
  772. adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
  773. if (netdev->flags & IFF_ALLMULTI &&
  774. !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
  775. adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
  776. else if (!(netdev->flags & IFF_ALLMULTI) &&
  777. adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
  778. adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
  779. }
  780. /**
  781. * i40evf_napi_enable_all - enable NAPI on all queue vectors
  782. * @adapter: board private structure
  783. **/
  784. static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
  785. {
  786. int q_idx;
  787. struct i40e_q_vector *q_vector;
  788. int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  789. for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  790. struct napi_struct *napi;
  791. q_vector = &adapter->q_vectors[q_idx];
  792. napi = &q_vector->napi;
  793. napi_enable(napi);
  794. }
  795. }
  796. /**
  797. * i40evf_napi_disable_all - disable NAPI on all queue vectors
  798. * @adapter: board private structure
  799. **/
  800. static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
  801. {
  802. int q_idx;
  803. struct i40e_q_vector *q_vector;
  804. int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  805. for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  806. q_vector = &adapter->q_vectors[q_idx];
  807. napi_disable(&q_vector->napi);
  808. }
  809. }
  810. /**
  811. * i40evf_configure - set up transmit and receive data structures
  812. * @adapter: board private structure
  813. **/
  814. static void i40evf_configure(struct i40evf_adapter *adapter)
  815. {
  816. struct net_device *netdev = adapter->netdev;
  817. int i;
  818. i40evf_set_rx_mode(netdev);
  819. i40evf_configure_tx(adapter);
  820. i40evf_configure_rx(adapter);
  821. adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
  822. for (i = 0; i < adapter->num_active_queues; i++) {
  823. struct i40e_ring *ring = &adapter->rx_rings[i];
  824. i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
  825. }
  826. }
  827. /**
  828. * i40evf_up_complete - Finish the last steps of bringing up a connection
  829. * @adapter: board private structure
  830. *
  831. * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
  832. **/
  833. static void i40evf_up_complete(struct i40evf_adapter *adapter)
  834. {
  835. adapter->state = __I40EVF_RUNNING;
  836. clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
  837. i40evf_napi_enable_all(adapter);
  838. adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
  839. if (CLIENT_ENABLED(adapter))
  840. adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
  841. mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
  842. }
  843. /**
  844. * i40e_down - Shutdown the connection processing
  845. * @adapter: board private structure
  846. *
  847. * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
  848. **/
  849. void i40evf_down(struct i40evf_adapter *adapter)
  850. {
  851. struct net_device *netdev = adapter->netdev;
  852. struct i40evf_vlan_filter *vlf;
  853. struct i40evf_mac_filter *f;
  854. struct i40evf_cloud_filter *cf;
  855. if (adapter->state <= __I40EVF_DOWN_PENDING)
  856. return;
  857. netif_carrier_off(netdev);
  858. netif_tx_disable(netdev);
  859. adapter->link_up = false;
  860. i40evf_napi_disable_all(adapter);
  861. i40evf_irq_disable(adapter);
  862. spin_lock_bh(&adapter->mac_vlan_list_lock);
  863. /* clear the sync flag on all filters */
  864. __dev_uc_unsync(adapter->netdev, NULL);
  865. __dev_mc_unsync(adapter->netdev, NULL);
  866. /* remove all MAC filters */
  867. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  868. f->remove = true;
  869. }
  870. /* remove all VLAN filters */
  871. list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
  872. vlf->remove = true;
  873. }
  874. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  875. /* remove all cloud filters */
  876. spin_lock_bh(&adapter->cloud_filter_list_lock);
  877. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  878. cf->del = true;
  879. }
  880. spin_unlock_bh(&adapter->cloud_filter_list_lock);
  881. if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
  882. adapter->state != __I40EVF_RESETTING) {
  883. /* cancel any current operation */
  884. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  885. /* Schedule operations to close down the HW. Don't wait
  886. * here for this to complete. The watchdog is still running
  887. * and it will take care of this.
  888. */
  889. adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
  890. adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
  891. adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
  892. adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
  893. }
  894. mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
  895. }
  896. /**
  897. * i40evf_acquire_msix_vectors - Setup the MSIX capability
  898. * @adapter: board private structure
  899. * @vectors: number of vectors to request
  900. *
  901. * Work with the OS to set up the MSIX vectors needed.
  902. *
  903. * Returns 0 on success, negative on failure
  904. **/
  905. static int
  906. i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
  907. {
  908. int err, vector_threshold;
  909. /* We'll want at least 3 (vector_threshold):
  910. * 0) Other (Admin Queue and link, mostly)
  911. * 1) TxQ[0] Cleanup
  912. * 2) RxQ[0] Cleanup
  913. */
  914. vector_threshold = MIN_MSIX_COUNT;
  915. /* The more we get, the more we will assign to Tx/Rx Cleanup
  916. * for the separate queues...where Rx Cleanup >= Tx Cleanup.
  917. * Right now, we simply care about how many we'll get; we'll
  918. * set them up later while requesting irq's.
  919. */
  920. err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
  921. vector_threshold, vectors);
  922. if (err < 0) {
  923. dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
  924. kfree(adapter->msix_entries);
  925. adapter->msix_entries = NULL;
  926. return err;
  927. }
  928. /* Adjust for only the vectors we'll use, which is minimum
  929. * of max_msix_q_vectors + NONQ_VECS, or the number of
  930. * vectors we were allocated.
  931. */
  932. adapter->num_msix_vectors = err;
  933. return 0;
  934. }
  935. /**
  936. * i40evf_free_queues - Free memory for all rings
  937. * @adapter: board private structure to initialize
  938. *
  939. * Free all of the memory associated with queue pairs.
  940. **/
  941. static void i40evf_free_queues(struct i40evf_adapter *adapter)
  942. {
  943. if (!adapter->vsi_res)
  944. return;
  945. adapter->num_active_queues = 0;
  946. kfree(adapter->tx_rings);
  947. adapter->tx_rings = NULL;
  948. kfree(adapter->rx_rings);
  949. adapter->rx_rings = NULL;
  950. }
  951. /**
  952. * i40evf_alloc_queues - Allocate memory for all rings
  953. * @adapter: board private structure to initialize
  954. *
  955. * We allocate one ring per queue at run-time since we don't know the
  956. * number of queues at compile-time. The polling_netdev array is
  957. * intended for Multiqueue, but should work fine with a single queue.
  958. **/
  959. static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
  960. {
  961. int i, num_active_queues;
  962. /* If we're in reset reallocating queues we don't actually know yet for
  963. * certain the PF gave us the number of queues we asked for but we'll
  964. * assume it did. Once basic reset is finished we'll confirm once we
  965. * start negotiating config with PF.
  966. */
  967. if (adapter->num_req_queues)
  968. num_active_queues = adapter->num_req_queues;
  969. else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
  970. adapter->num_tc)
  971. num_active_queues = adapter->ch_config.total_qps;
  972. else
  973. num_active_queues = min_t(int,
  974. adapter->vsi_res->num_queue_pairs,
  975. (int)(num_online_cpus()));
  976. adapter->tx_rings = kcalloc(num_active_queues,
  977. sizeof(struct i40e_ring), GFP_KERNEL);
  978. if (!adapter->tx_rings)
  979. goto err_out;
  980. adapter->rx_rings = kcalloc(num_active_queues,
  981. sizeof(struct i40e_ring), GFP_KERNEL);
  982. if (!adapter->rx_rings)
  983. goto err_out;
  984. for (i = 0; i < num_active_queues; i++) {
  985. struct i40e_ring *tx_ring;
  986. struct i40e_ring *rx_ring;
  987. tx_ring = &adapter->tx_rings[i];
  988. tx_ring->queue_index = i;
  989. tx_ring->netdev = adapter->netdev;
  990. tx_ring->dev = &adapter->pdev->dev;
  991. tx_ring->count = adapter->tx_desc_count;
  992. tx_ring->itr_setting = I40E_ITR_TX_DEF;
  993. if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
  994. tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
  995. rx_ring = &adapter->rx_rings[i];
  996. rx_ring->queue_index = i;
  997. rx_ring->netdev = adapter->netdev;
  998. rx_ring->dev = &adapter->pdev->dev;
  999. rx_ring->count = adapter->rx_desc_count;
  1000. rx_ring->itr_setting = I40E_ITR_RX_DEF;
  1001. }
  1002. adapter->num_active_queues = num_active_queues;
  1003. return 0;
  1004. err_out:
  1005. i40evf_free_queues(adapter);
  1006. return -ENOMEM;
  1007. }
  1008. /**
  1009. * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
  1010. * @adapter: board private structure to initialize
  1011. *
  1012. * Attempt to configure the interrupts using the best available
  1013. * capabilities of the hardware and the kernel.
  1014. **/
  1015. static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
  1016. {
  1017. int vector, v_budget;
  1018. int pairs = 0;
  1019. int err = 0;
  1020. if (!adapter->vsi_res) {
  1021. err = -EIO;
  1022. goto out;
  1023. }
  1024. pairs = adapter->num_active_queues;
  1025. /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
  1026. * us much good if we have more vectors than CPUs. However, we already
  1027. * limit the total number of queues by the number of CPUs so we do not
  1028. * need any further limiting here.
  1029. */
  1030. v_budget = min_t(int, pairs + NONQ_VECS,
  1031. (int)adapter->vf_res->max_vectors);
  1032. adapter->msix_entries = kcalloc(v_budget,
  1033. sizeof(struct msix_entry), GFP_KERNEL);
  1034. if (!adapter->msix_entries) {
  1035. err = -ENOMEM;
  1036. goto out;
  1037. }
  1038. for (vector = 0; vector < v_budget; vector++)
  1039. adapter->msix_entries[vector].entry = vector;
  1040. err = i40evf_acquire_msix_vectors(adapter, v_budget);
  1041. out:
  1042. netif_set_real_num_rx_queues(adapter->netdev, pairs);
  1043. netif_set_real_num_tx_queues(adapter->netdev, pairs);
  1044. return err;
  1045. }
  1046. /**
  1047. * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
  1048. * @adapter: board private structure
  1049. *
  1050. * Return 0 on success, negative on failure
  1051. **/
  1052. static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
  1053. {
  1054. struct i40e_aqc_get_set_rss_key_data *rss_key =
  1055. (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
  1056. struct i40e_hw *hw = &adapter->hw;
  1057. int ret = 0;
  1058. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  1059. /* bail because we already have a command pending */
  1060. dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
  1061. adapter->current_op);
  1062. return -EBUSY;
  1063. }
  1064. ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
  1065. if (ret) {
  1066. dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
  1067. i40evf_stat_str(hw, ret),
  1068. i40evf_aq_str(hw, hw->aq.asq_last_status));
  1069. return ret;
  1070. }
  1071. ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
  1072. adapter->rss_lut, adapter->rss_lut_size);
  1073. if (ret) {
  1074. dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
  1075. i40evf_stat_str(hw, ret),
  1076. i40evf_aq_str(hw, hw->aq.asq_last_status));
  1077. }
  1078. return ret;
  1079. }
  1080. /**
  1081. * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
  1082. * @adapter: board private structure
  1083. *
  1084. * Returns 0 on success, negative on failure
  1085. **/
  1086. static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
  1087. {
  1088. struct i40e_hw *hw = &adapter->hw;
  1089. u32 *dw;
  1090. u16 i;
  1091. dw = (u32 *)adapter->rss_key;
  1092. for (i = 0; i <= adapter->rss_key_size / 4; i++)
  1093. wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
  1094. dw = (u32 *)adapter->rss_lut;
  1095. for (i = 0; i <= adapter->rss_lut_size / 4; i++)
  1096. wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
  1097. i40e_flush(hw);
  1098. return 0;
  1099. }
  1100. /**
  1101. * i40evf_config_rss - Configure RSS keys and lut
  1102. * @adapter: board private structure
  1103. *
  1104. * Returns 0 on success, negative on failure
  1105. **/
  1106. int i40evf_config_rss(struct i40evf_adapter *adapter)
  1107. {
  1108. if (RSS_PF(adapter)) {
  1109. adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
  1110. I40EVF_FLAG_AQ_SET_RSS_KEY;
  1111. return 0;
  1112. } else if (RSS_AQ(adapter)) {
  1113. return i40evf_config_rss_aq(adapter);
  1114. } else {
  1115. return i40evf_config_rss_reg(adapter);
  1116. }
  1117. }
  1118. /**
  1119. * i40evf_fill_rss_lut - Fill the lut with default values
  1120. * @adapter: board private structure
  1121. **/
  1122. static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
  1123. {
  1124. u16 i;
  1125. for (i = 0; i < adapter->rss_lut_size; i++)
  1126. adapter->rss_lut[i] = i % adapter->num_active_queues;
  1127. }
  1128. /**
  1129. * i40evf_init_rss - Prepare for RSS
  1130. * @adapter: board private structure
  1131. *
  1132. * Return 0 on success, negative on failure
  1133. **/
  1134. static int i40evf_init_rss(struct i40evf_adapter *adapter)
  1135. {
  1136. struct i40e_hw *hw = &adapter->hw;
  1137. int ret;
  1138. if (!RSS_PF(adapter)) {
  1139. /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
  1140. if (adapter->vf_res->vf_cap_flags &
  1141. VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
  1142. adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
  1143. else
  1144. adapter->hena = I40E_DEFAULT_RSS_HENA;
  1145. wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
  1146. wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
  1147. }
  1148. i40evf_fill_rss_lut(adapter);
  1149. netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
  1150. ret = i40evf_config_rss(adapter);
  1151. return ret;
  1152. }
  1153. /**
  1154. * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
  1155. * @adapter: board private structure to initialize
  1156. *
  1157. * We allocate one q_vector per queue interrupt. If allocation fails we
  1158. * return -ENOMEM.
  1159. **/
  1160. static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
  1161. {
  1162. int q_idx = 0, num_q_vectors;
  1163. struct i40e_q_vector *q_vector;
  1164. num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  1165. adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
  1166. GFP_KERNEL);
  1167. if (!adapter->q_vectors)
  1168. return -ENOMEM;
  1169. for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  1170. q_vector = &adapter->q_vectors[q_idx];
  1171. q_vector->adapter = adapter;
  1172. q_vector->vsi = &adapter->vsi;
  1173. q_vector->v_idx = q_idx;
  1174. q_vector->reg_idx = q_idx;
  1175. cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
  1176. netif_napi_add(adapter->netdev, &q_vector->napi,
  1177. i40evf_napi_poll, NAPI_POLL_WEIGHT);
  1178. }
  1179. return 0;
  1180. }
  1181. /**
  1182. * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
  1183. * @adapter: board private structure to initialize
  1184. *
  1185. * This function frees the memory allocated to the q_vectors. In addition if
  1186. * NAPI is enabled it will delete any references to the NAPI struct prior
  1187. * to freeing the q_vector.
  1188. **/
  1189. static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
  1190. {
  1191. int q_idx, num_q_vectors;
  1192. int napi_vectors;
  1193. if (!adapter->q_vectors)
  1194. return;
  1195. num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  1196. napi_vectors = adapter->num_active_queues;
  1197. for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  1198. struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
  1199. if (q_idx < napi_vectors)
  1200. netif_napi_del(&q_vector->napi);
  1201. }
  1202. kfree(adapter->q_vectors);
  1203. adapter->q_vectors = NULL;
  1204. }
  1205. /**
  1206. * i40evf_reset_interrupt_capability - Reset MSIX setup
  1207. * @adapter: board private structure
  1208. *
  1209. **/
  1210. void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
  1211. {
  1212. if (!adapter->msix_entries)
  1213. return;
  1214. pci_disable_msix(adapter->pdev);
  1215. kfree(adapter->msix_entries);
  1216. adapter->msix_entries = NULL;
  1217. }
  1218. /**
  1219. * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
  1220. * @adapter: board private structure to initialize
  1221. *
  1222. **/
  1223. int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
  1224. {
  1225. int err;
  1226. err = i40evf_alloc_queues(adapter);
  1227. if (err) {
  1228. dev_err(&adapter->pdev->dev,
  1229. "Unable to allocate memory for queues\n");
  1230. goto err_alloc_queues;
  1231. }
  1232. rtnl_lock();
  1233. err = i40evf_set_interrupt_capability(adapter);
  1234. rtnl_unlock();
  1235. if (err) {
  1236. dev_err(&adapter->pdev->dev,
  1237. "Unable to setup interrupt capabilities\n");
  1238. goto err_set_interrupt;
  1239. }
  1240. err = i40evf_alloc_q_vectors(adapter);
  1241. if (err) {
  1242. dev_err(&adapter->pdev->dev,
  1243. "Unable to allocate memory for queue vectors\n");
  1244. goto err_alloc_q_vectors;
  1245. }
  1246. /* If we've made it so far while ADq flag being ON, then we haven't
  1247. * bailed out anywhere in middle. And ADq isn't just enabled but actual
  1248. * resources have been allocated in the reset path.
  1249. * Now we can truly claim that ADq is enabled.
  1250. */
  1251. if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
  1252. adapter->num_tc)
  1253. dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
  1254. adapter->num_tc);
  1255. dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
  1256. (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
  1257. adapter->num_active_queues);
  1258. return 0;
  1259. err_alloc_q_vectors:
  1260. i40evf_reset_interrupt_capability(adapter);
  1261. err_set_interrupt:
  1262. i40evf_free_queues(adapter);
  1263. err_alloc_queues:
  1264. return err;
  1265. }
  1266. /**
  1267. * i40evf_free_rss - Free memory used by RSS structs
  1268. * @adapter: board private structure
  1269. **/
  1270. static void i40evf_free_rss(struct i40evf_adapter *adapter)
  1271. {
  1272. kfree(adapter->rss_key);
  1273. adapter->rss_key = NULL;
  1274. kfree(adapter->rss_lut);
  1275. adapter->rss_lut = NULL;
  1276. }
  1277. /**
  1278. * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
  1279. * @adapter: board private structure
  1280. *
  1281. * Returns 0 on success, negative on failure
  1282. **/
  1283. static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
  1284. {
  1285. struct net_device *netdev = adapter->netdev;
  1286. int err;
  1287. if (netif_running(netdev))
  1288. i40evf_free_traffic_irqs(adapter);
  1289. i40evf_free_misc_irq(adapter);
  1290. i40evf_reset_interrupt_capability(adapter);
  1291. i40evf_free_q_vectors(adapter);
  1292. i40evf_free_queues(adapter);
  1293. err = i40evf_init_interrupt_scheme(adapter);
  1294. if (err)
  1295. goto err;
  1296. netif_tx_stop_all_queues(netdev);
  1297. err = i40evf_request_misc_irq(adapter);
  1298. if (err)
  1299. goto err;
  1300. set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
  1301. i40evf_map_rings_to_vectors(adapter);
  1302. if (RSS_AQ(adapter))
  1303. adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
  1304. else
  1305. err = i40evf_init_rss(adapter);
  1306. err:
  1307. return err;
  1308. }
  1309. /**
  1310. * i40evf_watchdog_timer - Periodic call-back timer
  1311. * @data: pointer to adapter disguised as unsigned long
  1312. **/
  1313. static void i40evf_watchdog_timer(struct timer_list *t)
  1314. {
  1315. struct i40evf_adapter *adapter = from_timer(adapter, t,
  1316. watchdog_timer);
  1317. schedule_work(&adapter->watchdog_task);
  1318. /* timer will be rescheduled in watchdog task */
  1319. }
  1320. /**
  1321. * i40evf_watchdog_task - Periodic call-back task
  1322. * @work: pointer to work_struct
  1323. **/
  1324. static void i40evf_watchdog_task(struct work_struct *work)
  1325. {
  1326. struct i40evf_adapter *adapter = container_of(work,
  1327. struct i40evf_adapter,
  1328. watchdog_task);
  1329. struct i40e_hw *hw = &adapter->hw;
  1330. u32 reg_val;
  1331. if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
  1332. goto restart_watchdog;
  1333. if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
  1334. reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
  1335. I40E_VFGEN_RSTAT_VFR_STATE_MASK;
  1336. if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
  1337. (reg_val == VIRTCHNL_VFR_COMPLETED)) {
  1338. /* A chance for redemption! */
  1339. dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
  1340. adapter->state = __I40EVF_STARTUP;
  1341. adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
  1342. schedule_delayed_work(&adapter->init_task, 10);
  1343. clear_bit(__I40EVF_IN_CRITICAL_TASK,
  1344. &adapter->crit_section);
  1345. /* Don't reschedule the watchdog, since we've restarted
  1346. * the init task. When init_task contacts the PF and
  1347. * gets everything set up again, it'll restart the
  1348. * watchdog for us. Down, boy. Sit. Stay. Woof.
  1349. */
  1350. return;
  1351. }
  1352. adapter->aq_required = 0;
  1353. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1354. goto watchdog_done;
  1355. }
  1356. if ((adapter->state < __I40EVF_DOWN) ||
  1357. (adapter->flags & I40EVF_FLAG_RESET_PENDING))
  1358. goto watchdog_done;
  1359. /* check for reset */
  1360. reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
  1361. if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
  1362. adapter->state = __I40EVF_RESETTING;
  1363. adapter->flags |= I40EVF_FLAG_RESET_PENDING;
  1364. dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
  1365. schedule_work(&adapter->reset_task);
  1366. adapter->aq_required = 0;
  1367. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1368. goto watchdog_done;
  1369. }
  1370. /* Process admin queue tasks. After init, everything gets done
  1371. * here so we don't race on the admin queue.
  1372. */
  1373. if (adapter->current_op) {
  1374. if (!i40evf_asq_done(hw)) {
  1375. dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
  1376. i40evf_send_api_ver(adapter);
  1377. }
  1378. goto watchdog_done;
  1379. }
  1380. if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
  1381. i40evf_send_vf_config_msg(adapter);
  1382. goto watchdog_done;
  1383. }
  1384. if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
  1385. i40evf_disable_queues(adapter);
  1386. goto watchdog_done;
  1387. }
  1388. if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
  1389. i40evf_map_queues(adapter);
  1390. goto watchdog_done;
  1391. }
  1392. if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
  1393. i40evf_add_ether_addrs(adapter);
  1394. goto watchdog_done;
  1395. }
  1396. if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
  1397. i40evf_add_vlans(adapter);
  1398. goto watchdog_done;
  1399. }
  1400. if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
  1401. i40evf_del_ether_addrs(adapter);
  1402. goto watchdog_done;
  1403. }
  1404. if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
  1405. i40evf_del_vlans(adapter);
  1406. goto watchdog_done;
  1407. }
  1408. if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
  1409. i40evf_enable_vlan_stripping(adapter);
  1410. goto watchdog_done;
  1411. }
  1412. if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
  1413. i40evf_disable_vlan_stripping(adapter);
  1414. goto watchdog_done;
  1415. }
  1416. if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
  1417. i40evf_configure_queues(adapter);
  1418. goto watchdog_done;
  1419. }
  1420. if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
  1421. i40evf_enable_queues(adapter);
  1422. goto watchdog_done;
  1423. }
  1424. if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
  1425. /* This message goes straight to the firmware, not the
  1426. * PF, so we don't have to set current_op as we will
  1427. * not get a response through the ARQ.
  1428. */
  1429. i40evf_init_rss(adapter);
  1430. adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
  1431. goto watchdog_done;
  1432. }
  1433. if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
  1434. i40evf_get_hena(adapter);
  1435. goto watchdog_done;
  1436. }
  1437. if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
  1438. i40evf_set_hena(adapter);
  1439. goto watchdog_done;
  1440. }
  1441. if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
  1442. i40evf_set_rss_key(adapter);
  1443. goto watchdog_done;
  1444. }
  1445. if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
  1446. i40evf_set_rss_lut(adapter);
  1447. goto watchdog_done;
  1448. }
  1449. if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
  1450. i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
  1451. FLAG_VF_MULTICAST_PROMISC);
  1452. goto watchdog_done;
  1453. }
  1454. if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
  1455. i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
  1456. goto watchdog_done;
  1457. }
  1458. if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
  1459. (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
  1460. i40evf_set_promiscuous(adapter, 0);
  1461. goto watchdog_done;
  1462. }
  1463. if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
  1464. i40evf_enable_channels(adapter);
  1465. goto watchdog_done;
  1466. }
  1467. if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
  1468. i40evf_disable_channels(adapter);
  1469. goto watchdog_done;
  1470. }
  1471. if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
  1472. i40evf_add_cloud_filter(adapter);
  1473. goto watchdog_done;
  1474. }
  1475. if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
  1476. i40evf_del_cloud_filter(adapter);
  1477. goto watchdog_done;
  1478. }
  1479. schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
  1480. if (adapter->state == __I40EVF_RUNNING)
  1481. i40evf_request_stats(adapter);
  1482. watchdog_done:
  1483. if (adapter->state == __I40EVF_RUNNING)
  1484. i40evf_detect_recover_hung(&adapter->vsi);
  1485. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  1486. restart_watchdog:
  1487. if (adapter->state == __I40EVF_REMOVE)
  1488. return;
  1489. if (adapter->aq_required)
  1490. mod_timer(&adapter->watchdog_timer,
  1491. jiffies + msecs_to_jiffies(20));
  1492. else
  1493. mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
  1494. schedule_work(&adapter->adminq_task);
  1495. }
  1496. static void i40evf_disable_vf(struct i40evf_adapter *adapter)
  1497. {
  1498. struct i40evf_mac_filter *f, *ftmp;
  1499. struct i40evf_vlan_filter *fv, *fvtmp;
  1500. struct i40evf_cloud_filter *cf, *cftmp;
  1501. adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
  1502. /* We don't use netif_running() because it may be true prior to
  1503. * ndo_open() returning, so we can't assume it means all our open
  1504. * tasks have finished, since we're not holding the rtnl_lock here.
  1505. */
  1506. if (adapter->state == __I40EVF_RUNNING) {
  1507. set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
  1508. netif_carrier_off(adapter->netdev);
  1509. netif_tx_disable(adapter->netdev);
  1510. adapter->link_up = false;
  1511. i40evf_napi_disable_all(adapter);
  1512. i40evf_irq_disable(adapter);
  1513. i40evf_free_traffic_irqs(adapter);
  1514. i40evf_free_all_tx_resources(adapter);
  1515. i40evf_free_all_rx_resources(adapter);
  1516. }
  1517. spin_lock_bh(&adapter->mac_vlan_list_lock);
  1518. /* Delete all of the filters */
  1519. list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
  1520. list_del(&f->list);
  1521. kfree(f);
  1522. }
  1523. list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
  1524. list_del(&fv->list);
  1525. kfree(fv);
  1526. }
  1527. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  1528. spin_lock_bh(&adapter->cloud_filter_list_lock);
  1529. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
  1530. list_del(&cf->list);
  1531. kfree(cf);
  1532. adapter->num_cloud_filters--;
  1533. }
  1534. spin_unlock_bh(&adapter->cloud_filter_list_lock);
  1535. i40evf_free_misc_irq(adapter);
  1536. i40evf_reset_interrupt_capability(adapter);
  1537. i40evf_free_queues(adapter);
  1538. i40evf_free_q_vectors(adapter);
  1539. kfree(adapter->vf_res);
  1540. i40evf_shutdown_adminq(&adapter->hw);
  1541. adapter->netdev->flags &= ~IFF_UP;
  1542. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  1543. adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
  1544. adapter->state = __I40EVF_DOWN;
  1545. wake_up(&adapter->down_waitqueue);
  1546. dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
  1547. }
  1548. #define I40EVF_RESET_WAIT_MS 10
  1549. #define I40EVF_RESET_WAIT_COUNT 500
  1550. /**
  1551. * i40evf_reset_task - Call-back task to handle hardware reset
  1552. * @work: pointer to work_struct
  1553. *
  1554. * During reset we need to shut down and reinitialize the admin queue
  1555. * before we can use it to communicate with the PF again. We also clear
  1556. * and reinit the rings because that context is lost as well.
  1557. **/
  1558. static void i40evf_reset_task(struct work_struct *work)
  1559. {
  1560. struct i40evf_adapter *adapter = container_of(work,
  1561. struct i40evf_adapter,
  1562. reset_task);
  1563. struct virtchnl_vf_resource *vfres = adapter->vf_res;
  1564. struct net_device *netdev = adapter->netdev;
  1565. struct i40e_hw *hw = &adapter->hw;
  1566. struct i40evf_vlan_filter *vlf;
  1567. struct i40evf_cloud_filter *cf;
  1568. struct i40evf_mac_filter *f;
  1569. u32 reg_val;
  1570. int i = 0, err;
  1571. bool running;
  1572. /* When device is being removed it doesn't make sense to run the reset
  1573. * task, just return in such a case.
  1574. */
  1575. if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
  1576. return;
  1577. while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
  1578. &adapter->crit_section))
  1579. usleep_range(500, 1000);
  1580. if (CLIENT_ENABLED(adapter)) {
  1581. adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
  1582. I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
  1583. I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
  1584. I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
  1585. cancel_delayed_work_sync(&adapter->client_task);
  1586. i40evf_notify_client_close(&adapter->vsi, true);
  1587. }
  1588. i40evf_misc_irq_disable(adapter);
  1589. if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
  1590. adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
  1591. /* Restart the AQ here. If we have been reset but didn't
  1592. * detect it, or if the PF had to reinit, our AQ will be hosed.
  1593. */
  1594. i40evf_shutdown_adminq(hw);
  1595. i40evf_init_adminq(hw);
  1596. i40evf_request_reset(adapter);
  1597. }
  1598. adapter->flags |= I40EVF_FLAG_RESET_PENDING;
  1599. /* poll until we see the reset actually happen */
  1600. for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
  1601. reg_val = rd32(hw, I40E_VF_ARQLEN1) &
  1602. I40E_VF_ARQLEN1_ARQENABLE_MASK;
  1603. if (!reg_val)
  1604. break;
  1605. usleep_range(5000, 10000);
  1606. }
  1607. if (i == I40EVF_RESET_WAIT_COUNT) {
  1608. dev_info(&adapter->pdev->dev, "Never saw reset\n");
  1609. goto continue_reset; /* act like the reset happened */
  1610. }
  1611. /* wait until the reset is complete and the PF is responding to us */
  1612. for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
  1613. /* sleep first to make sure a minimum wait time is met */
  1614. msleep(I40EVF_RESET_WAIT_MS);
  1615. reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
  1616. I40E_VFGEN_RSTAT_VFR_STATE_MASK;
  1617. if (reg_val == VIRTCHNL_VFR_VFACTIVE)
  1618. break;
  1619. }
  1620. pci_set_master(adapter->pdev);
  1621. if (i == I40EVF_RESET_WAIT_COUNT) {
  1622. dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
  1623. reg_val);
  1624. i40evf_disable_vf(adapter);
  1625. clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
  1626. return; /* Do not attempt to reinit. It's dead, Jim. */
  1627. }
  1628. continue_reset:
  1629. /* We don't use netif_running() because it may be true prior to
  1630. * ndo_open() returning, so we can't assume it means all our open
  1631. * tasks have finished, since we're not holding the rtnl_lock here.
  1632. */
  1633. running = ((adapter->state == __I40EVF_RUNNING) ||
  1634. (adapter->state == __I40EVF_RESETTING));
  1635. if (running) {
  1636. netif_carrier_off(netdev);
  1637. netif_tx_stop_all_queues(netdev);
  1638. adapter->link_up = false;
  1639. i40evf_napi_disable_all(adapter);
  1640. }
  1641. i40evf_irq_disable(adapter);
  1642. adapter->state = __I40EVF_RESETTING;
  1643. adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
  1644. /* free the Tx/Rx rings and descriptors, might be better to just
  1645. * re-use them sometime in the future
  1646. */
  1647. i40evf_free_all_rx_resources(adapter);
  1648. i40evf_free_all_tx_resources(adapter);
  1649. adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
  1650. /* kill and reinit the admin queue */
  1651. i40evf_shutdown_adminq(hw);
  1652. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1653. err = i40evf_init_adminq(hw);
  1654. if (err)
  1655. dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
  1656. err);
  1657. adapter->aq_required = 0;
  1658. if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
  1659. err = i40evf_reinit_interrupt_scheme(adapter);
  1660. if (err)
  1661. goto reset_err;
  1662. }
  1663. adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
  1664. adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
  1665. spin_lock_bh(&adapter->mac_vlan_list_lock);
  1666. /* re-add all MAC filters */
  1667. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  1668. f->add = true;
  1669. }
  1670. /* re-add all VLAN filters */
  1671. list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
  1672. vlf->add = true;
  1673. }
  1674. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  1675. /* check if TCs are running and re-add all cloud filters */
  1676. spin_lock_bh(&adapter->cloud_filter_list_lock);
  1677. if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
  1678. adapter->num_tc) {
  1679. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  1680. cf->add = true;
  1681. }
  1682. }
  1683. spin_unlock_bh(&adapter->cloud_filter_list_lock);
  1684. adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
  1685. adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
  1686. adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
  1687. i40evf_misc_irq_enable(adapter);
  1688. mod_timer(&adapter->watchdog_timer, jiffies + 2);
  1689. /* We were running when the reset started, so we need to restore some
  1690. * state here.
  1691. */
  1692. if (running) {
  1693. /* allocate transmit descriptors */
  1694. err = i40evf_setup_all_tx_resources(adapter);
  1695. if (err)
  1696. goto reset_err;
  1697. /* allocate receive descriptors */
  1698. err = i40evf_setup_all_rx_resources(adapter);
  1699. if (err)
  1700. goto reset_err;
  1701. if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
  1702. err = i40evf_request_traffic_irqs(adapter,
  1703. netdev->name);
  1704. if (err)
  1705. goto reset_err;
  1706. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  1707. }
  1708. i40evf_configure(adapter);
  1709. i40evf_up_complete(adapter);
  1710. i40evf_irq_enable(adapter, true);
  1711. } else {
  1712. adapter->state = __I40EVF_DOWN;
  1713. wake_up(&adapter->down_waitqueue);
  1714. }
  1715. clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
  1716. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  1717. return;
  1718. reset_err:
  1719. clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
  1720. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  1721. dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
  1722. i40evf_close(netdev);
  1723. }
  1724. /**
  1725. * i40evf_adminq_task - worker thread to clean the admin queue
  1726. * @work: pointer to work_struct containing our data
  1727. **/
  1728. static void i40evf_adminq_task(struct work_struct *work)
  1729. {
  1730. struct i40evf_adapter *adapter =
  1731. container_of(work, struct i40evf_adapter, adminq_task);
  1732. struct i40e_hw *hw = &adapter->hw;
  1733. struct i40e_arq_event_info event;
  1734. enum virtchnl_ops v_op;
  1735. i40e_status ret, v_ret;
  1736. u32 val, oldval;
  1737. u16 pending;
  1738. if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
  1739. goto out;
  1740. event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
  1741. event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
  1742. if (!event.msg_buf)
  1743. goto out;
  1744. do {
  1745. ret = i40evf_clean_arq_element(hw, &event, &pending);
  1746. v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
  1747. v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
  1748. if (ret || !v_op)
  1749. break; /* No event to process or error cleaning ARQ */
  1750. i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
  1751. event.msg_len);
  1752. if (pending != 0)
  1753. memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
  1754. } while (pending);
  1755. if ((adapter->flags &
  1756. (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
  1757. adapter->state == __I40EVF_RESETTING)
  1758. goto freedom;
  1759. /* check for error indications */
  1760. val = rd32(hw, hw->aq.arq.len);
  1761. if (val == 0xdeadbeef) /* indicates device in reset */
  1762. goto freedom;
  1763. oldval = val;
  1764. if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
  1765. dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
  1766. val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
  1767. }
  1768. if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
  1769. dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
  1770. val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
  1771. }
  1772. if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
  1773. dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
  1774. val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
  1775. }
  1776. if (oldval != val)
  1777. wr32(hw, hw->aq.arq.len, val);
  1778. val = rd32(hw, hw->aq.asq.len);
  1779. oldval = val;
  1780. if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
  1781. dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
  1782. val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
  1783. }
  1784. if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
  1785. dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
  1786. val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
  1787. }
  1788. if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
  1789. dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
  1790. val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
  1791. }
  1792. if (oldval != val)
  1793. wr32(hw, hw->aq.asq.len, val);
  1794. freedom:
  1795. kfree(event.msg_buf);
  1796. out:
  1797. /* re-enable Admin queue interrupt cause */
  1798. i40evf_misc_irq_enable(adapter);
  1799. }
  1800. /**
  1801. * i40evf_client_task - worker thread to perform client work
  1802. * @work: pointer to work_struct containing our data
  1803. *
  1804. * This task handles client interactions. Because client calls can be
  1805. * reentrant, we can't handle them in the watchdog.
  1806. **/
  1807. static void i40evf_client_task(struct work_struct *work)
  1808. {
  1809. struct i40evf_adapter *adapter =
  1810. container_of(work, struct i40evf_adapter, client_task.work);
  1811. /* If we can't get the client bit, just give up. We'll be rescheduled
  1812. * later.
  1813. */
  1814. if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
  1815. return;
  1816. if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
  1817. i40evf_client_subtask(adapter);
  1818. adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
  1819. goto out;
  1820. }
  1821. if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
  1822. i40evf_notify_client_l2_params(&adapter->vsi);
  1823. adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
  1824. goto out;
  1825. }
  1826. if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
  1827. i40evf_notify_client_close(&adapter->vsi, false);
  1828. adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
  1829. goto out;
  1830. }
  1831. if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
  1832. i40evf_notify_client_open(&adapter->vsi);
  1833. adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
  1834. }
  1835. out:
  1836. clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
  1837. }
  1838. /**
  1839. * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
  1840. * @adapter: board private structure
  1841. *
  1842. * Free all transmit software resources
  1843. **/
  1844. void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
  1845. {
  1846. int i;
  1847. if (!adapter->tx_rings)
  1848. return;
  1849. for (i = 0; i < adapter->num_active_queues; i++)
  1850. if (adapter->tx_rings[i].desc)
  1851. i40evf_free_tx_resources(&adapter->tx_rings[i]);
  1852. }
  1853. /**
  1854. * i40evf_setup_all_tx_resources - allocate all queues Tx resources
  1855. * @adapter: board private structure
  1856. *
  1857. * If this function returns with an error, then it's possible one or
  1858. * more of the rings is populated (while the rest are not). It is the
  1859. * callers duty to clean those orphaned rings.
  1860. *
  1861. * Return 0 on success, negative on failure
  1862. **/
  1863. static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
  1864. {
  1865. int i, err = 0;
  1866. for (i = 0; i < adapter->num_active_queues; i++) {
  1867. adapter->tx_rings[i].count = adapter->tx_desc_count;
  1868. err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
  1869. if (!err)
  1870. continue;
  1871. dev_err(&adapter->pdev->dev,
  1872. "Allocation for Tx Queue %u failed\n", i);
  1873. break;
  1874. }
  1875. return err;
  1876. }
  1877. /**
  1878. * i40evf_setup_all_rx_resources - allocate all queues Rx resources
  1879. * @adapter: board private structure
  1880. *
  1881. * If this function returns with an error, then it's possible one or
  1882. * more of the rings is populated (while the rest are not). It is the
  1883. * callers duty to clean those orphaned rings.
  1884. *
  1885. * Return 0 on success, negative on failure
  1886. **/
  1887. static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
  1888. {
  1889. int i, err = 0;
  1890. for (i = 0; i < adapter->num_active_queues; i++) {
  1891. adapter->rx_rings[i].count = adapter->rx_desc_count;
  1892. err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
  1893. if (!err)
  1894. continue;
  1895. dev_err(&adapter->pdev->dev,
  1896. "Allocation for Rx Queue %u failed\n", i);
  1897. break;
  1898. }
  1899. return err;
  1900. }
  1901. /**
  1902. * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
  1903. * @adapter: board private structure
  1904. *
  1905. * Free all receive software resources
  1906. **/
  1907. void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
  1908. {
  1909. int i;
  1910. if (!adapter->rx_rings)
  1911. return;
  1912. for (i = 0; i < adapter->num_active_queues; i++)
  1913. if (adapter->rx_rings[i].desc)
  1914. i40evf_free_rx_resources(&adapter->rx_rings[i]);
  1915. }
  1916. /**
  1917. * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
  1918. * @adapter: board private structure
  1919. * @max_tx_rate: max Tx bw for a tc
  1920. **/
  1921. static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
  1922. u64 max_tx_rate)
  1923. {
  1924. int speed = 0, ret = 0;
  1925. switch (adapter->link_speed) {
  1926. case I40E_LINK_SPEED_40GB:
  1927. speed = 40000;
  1928. break;
  1929. case I40E_LINK_SPEED_25GB:
  1930. speed = 25000;
  1931. break;
  1932. case I40E_LINK_SPEED_20GB:
  1933. speed = 20000;
  1934. break;
  1935. case I40E_LINK_SPEED_10GB:
  1936. speed = 10000;
  1937. break;
  1938. case I40E_LINK_SPEED_1GB:
  1939. speed = 1000;
  1940. break;
  1941. case I40E_LINK_SPEED_100MB:
  1942. speed = 100;
  1943. break;
  1944. default:
  1945. break;
  1946. }
  1947. if (max_tx_rate > speed) {
  1948. dev_err(&adapter->pdev->dev,
  1949. "Invalid tx rate specified\n");
  1950. ret = -EINVAL;
  1951. }
  1952. return ret;
  1953. }
  1954. /**
  1955. * i40evf_validate_channel_config - validate queue mapping info
  1956. * @adapter: board private structure
  1957. * @mqprio_qopt: queue parameters
  1958. *
  1959. * This function validates if the config provided by the user to
  1960. * configure queue channels is valid or not. Returns 0 on a valid
  1961. * config.
  1962. **/
  1963. static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
  1964. struct tc_mqprio_qopt_offload *mqprio_qopt)
  1965. {
  1966. u64 total_max_rate = 0;
  1967. int i, num_qps = 0;
  1968. u64 tx_rate = 0;
  1969. int ret = 0;
  1970. if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
  1971. mqprio_qopt->qopt.num_tc < 1)
  1972. return -EINVAL;
  1973. for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
  1974. if (!mqprio_qopt->qopt.count[i] ||
  1975. mqprio_qopt->qopt.offset[i] != num_qps)
  1976. return -EINVAL;
  1977. if (mqprio_qopt->min_rate[i]) {
  1978. dev_err(&adapter->pdev->dev,
  1979. "Invalid min tx rate (greater than 0) specified\n");
  1980. return -EINVAL;
  1981. }
  1982. /*convert to Mbps */
  1983. tx_rate = div_u64(mqprio_qopt->max_rate[i],
  1984. I40EVF_MBPS_DIVISOR);
  1985. total_max_rate += tx_rate;
  1986. num_qps += mqprio_qopt->qopt.count[i];
  1987. }
  1988. if (num_qps > I40EVF_MAX_REQ_QUEUES)
  1989. return -EINVAL;
  1990. ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
  1991. return ret;
  1992. }
  1993. /**
  1994. * i40evf_del_all_cloud_filters - delete all cloud filters
  1995. * on the traffic classes
  1996. **/
  1997. static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
  1998. {
  1999. struct i40evf_cloud_filter *cf, *cftmp;
  2000. spin_lock_bh(&adapter->cloud_filter_list_lock);
  2001. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
  2002. list) {
  2003. list_del(&cf->list);
  2004. kfree(cf);
  2005. adapter->num_cloud_filters--;
  2006. }
  2007. spin_unlock_bh(&adapter->cloud_filter_list_lock);
  2008. }
  2009. /**
  2010. * __i40evf_setup_tc - configure multiple traffic classes
  2011. * @netdev: network interface device structure
  2012. * @type_date: tc offload data
  2013. *
  2014. * This function processes the config information provided by the
  2015. * user to configure traffic classes/queue channels and packages the
  2016. * information to request the PF to setup traffic classes.
  2017. *
  2018. * Returns 0 on success.
  2019. **/
  2020. static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
  2021. {
  2022. struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
  2023. struct i40evf_adapter *adapter = netdev_priv(netdev);
  2024. struct virtchnl_vf_resource *vfres = adapter->vf_res;
  2025. u8 num_tc = 0, total_qps = 0;
  2026. int ret = 0, netdev_tc = 0;
  2027. u64 max_tx_rate;
  2028. u16 mode;
  2029. int i;
  2030. num_tc = mqprio_qopt->qopt.num_tc;
  2031. mode = mqprio_qopt->mode;
  2032. /* delete queue_channel */
  2033. if (!mqprio_qopt->qopt.hw) {
  2034. if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
  2035. /* reset the tc configuration */
  2036. netdev_reset_tc(netdev);
  2037. adapter->num_tc = 0;
  2038. netif_tx_stop_all_queues(netdev);
  2039. netif_tx_disable(netdev);
  2040. i40evf_del_all_cloud_filters(adapter);
  2041. adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
  2042. goto exit;
  2043. } else {
  2044. return -EINVAL;
  2045. }
  2046. }
  2047. /* add queue channel */
  2048. if (mode == TC_MQPRIO_MODE_CHANNEL) {
  2049. if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
  2050. dev_err(&adapter->pdev->dev, "ADq not supported\n");
  2051. return -EOPNOTSUPP;
  2052. }
  2053. if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
  2054. dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
  2055. return -EINVAL;
  2056. }
  2057. ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
  2058. if (ret)
  2059. return ret;
  2060. /* Return if same TC config is requested */
  2061. if (adapter->num_tc == num_tc)
  2062. return 0;
  2063. adapter->num_tc = num_tc;
  2064. for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
  2065. if (i < num_tc) {
  2066. adapter->ch_config.ch_info[i].count =
  2067. mqprio_qopt->qopt.count[i];
  2068. adapter->ch_config.ch_info[i].offset =
  2069. mqprio_qopt->qopt.offset[i];
  2070. total_qps += mqprio_qopt->qopt.count[i];
  2071. max_tx_rate = mqprio_qopt->max_rate[i];
  2072. /* convert to Mbps */
  2073. max_tx_rate = div_u64(max_tx_rate,
  2074. I40EVF_MBPS_DIVISOR);
  2075. adapter->ch_config.ch_info[i].max_tx_rate =
  2076. max_tx_rate;
  2077. } else {
  2078. adapter->ch_config.ch_info[i].count = 1;
  2079. adapter->ch_config.ch_info[i].offset = 0;
  2080. }
  2081. }
  2082. adapter->ch_config.total_qps = total_qps;
  2083. netif_tx_stop_all_queues(netdev);
  2084. netif_tx_disable(netdev);
  2085. adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
  2086. netdev_reset_tc(netdev);
  2087. /* Report the tc mapping up the stack */
  2088. netdev_set_num_tc(adapter->netdev, num_tc);
  2089. for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
  2090. u16 qcount = mqprio_qopt->qopt.count[i];
  2091. u16 qoffset = mqprio_qopt->qopt.offset[i];
  2092. if (i < num_tc)
  2093. netdev_set_tc_queue(netdev, netdev_tc++, qcount,
  2094. qoffset);
  2095. }
  2096. }
  2097. exit:
  2098. return ret;
  2099. }
  2100. /**
  2101. * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
  2102. * @adapter: board private structure
  2103. * @cls_flower: pointer to struct tc_cls_flower_offload
  2104. * @filter: pointer to cloud filter structure
  2105. */
  2106. static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
  2107. struct tc_cls_flower_offload *f,
  2108. struct i40evf_cloud_filter *filter)
  2109. {
  2110. u16 n_proto_mask = 0;
  2111. u16 n_proto_key = 0;
  2112. u8 field_flags = 0;
  2113. u16 addr_type = 0;
  2114. u16 n_proto = 0;
  2115. int i = 0;
  2116. struct virtchnl_filter *vf = &filter->f;
  2117. if (f->dissector->used_keys &
  2118. ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  2119. BIT(FLOW_DISSECTOR_KEY_BASIC) |
  2120. BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
  2121. BIT(FLOW_DISSECTOR_KEY_VLAN) |
  2122. BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
  2123. BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
  2124. BIT(FLOW_DISSECTOR_KEY_PORTS) |
  2125. BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
  2126. dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
  2127. f->dissector->used_keys);
  2128. return -EOPNOTSUPP;
  2129. }
  2130. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
  2131. struct flow_dissector_key_keyid *mask =
  2132. skb_flow_dissector_target(f->dissector,
  2133. FLOW_DISSECTOR_KEY_ENC_KEYID,
  2134. f->mask);
  2135. if (mask->keyid != 0)
  2136. field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
  2137. }
  2138. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  2139. struct flow_dissector_key_basic *key =
  2140. skb_flow_dissector_target(f->dissector,
  2141. FLOW_DISSECTOR_KEY_BASIC,
  2142. f->key);
  2143. struct flow_dissector_key_basic *mask =
  2144. skb_flow_dissector_target(f->dissector,
  2145. FLOW_DISSECTOR_KEY_BASIC,
  2146. f->mask);
  2147. n_proto_key = ntohs(key->n_proto);
  2148. n_proto_mask = ntohs(mask->n_proto);
  2149. if (n_proto_key == ETH_P_ALL) {
  2150. n_proto_key = 0;
  2151. n_proto_mask = 0;
  2152. }
  2153. n_proto = n_proto_key & n_proto_mask;
  2154. if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
  2155. return -EINVAL;
  2156. if (n_proto == ETH_P_IPV6) {
  2157. /* specify flow type as TCP IPv6 */
  2158. vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
  2159. }
  2160. if (key->ip_proto != IPPROTO_TCP) {
  2161. dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
  2162. return -EINVAL;
  2163. }
  2164. }
  2165. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  2166. struct flow_dissector_key_eth_addrs *key =
  2167. skb_flow_dissector_target(f->dissector,
  2168. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  2169. f->key);
  2170. struct flow_dissector_key_eth_addrs *mask =
  2171. skb_flow_dissector_target(f->dissector,
  2172. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  2173. f->mask);
  2174. /* use is_broadcast and is_zero to check for all 0xf or 0 */
  2175. if (!is_zero_ether_addr(mask->dst)) {
  2176. if (is_broadcast_ether_addr(mask->dst)) {
  2177. field_flags |= I40EVF_CLOUD_FIELD_OMAC;
  2178. } else {
  2179. dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
  2180. mask->dst);
  2181. return I40E_ERR_CONFIG;
  2182. }
  2183. }
  2184. if (!is_zero_ether_addr(mask->src)) {
  2185. if (is_broadcast_ether_addr(mask->src)) {
  2186. field_flags |= I40EVF_CLOUD_FIELD_IMAC;
  2187. } else {
  2188. dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
  2189. mask->src);
  2190. return I40E_ERR_CONFIG;
  2191. }
  2192. }
  2193. if (!is_zero_ether_addr(key->dst))
  2194. if (is_valid_ether_addr(key->dst) ||
  2195. is_multicast_ether_addr(key->dst)) {
  2196. /* set the mask if a valid dst_mac address */
  2197. for (i = 0; i < ETH_ALEN; i++)
  2198. vf->mask.tcp_spec.dst_mac[i] |= 0xff;
  2199. ether_addr_copy(vf->data.tcp_spec.dst_mac,
  2200. key->dst);
  2201. }
  2202. if (!is_zero_ether_addr(key->src))
  2203. if (is_valid_ether_addr(key->src) ||
  2204. is_multicast_ether_addr(key->src)) {
  2205. /* set the mask if a valid dst_mac address */
  2206. for (i = 0; i < ETH_ALEN; i++)
  2207. vf->mask.tcp_spec.src_mac[i] |= 0xff;
  2208. ether_addr_copy(vf->data.tcp_spec.src_mac,
  2209. key->src);
  2210. }
  2211. }
  2212. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  2213. struct flow_dissector_key_vlan *key =
  2214. skb_flow_dissector_target(f->dissector,
  2215. FLOW_DISSECTOR_KEY_VLAN,
  2216. f->key);
  2217. struct flow_dissector_key_vlan *mask =
  2218. skb_flow_dissector_target(f->dissector,
  2219. FLOW_DISSECTOR_KEY_VLAN,
  2220. f->mask);
  2221. if (mask->vlan_id) {
  2222. if (mask->vlan_id == VLAN_VID_MASK) {
  2223. field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
  2224. } else {
  2225. dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
  2226. mask->vlan_id);
  2227. return I40E_ERR_CONFIG;
  2228. }
  2229. }
  2230. vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
  2231. vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
  2232. }
  2233. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
  2234. struct flow_dissector_key_control *key =
  2235. skb_flow_dissector_target(f->dissector,
  2236. FLOW_DISSECTOR_KEY_CONTROL,
  2237. f->key);
  2238. addr_type = key->addr_type;
  2239. }
  2240. if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
  2241. struct flow_dissector_key_ipv4_addrs *key =
  2242. skb_flow_dissector_target(f->dissector,
  2243. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  2244. f->key);
  2245. struct flow_dissector_key_ipv4_addrs *mask =
  2246. skb_flow_dissector_target(f->dissector,
  2247. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  2248. f->mask);
  2249. if (mask->dst) {
  2250. if (mask->dst == cpu_to_be32(0xffffffff)) {
  2251. field_flags |= I40EVF_CLOUD_FIELD_IIP;
  2252. } else {
  2253. dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
  2254. be32_to_cpu(mask->dst));
  2255. return I40E_ERR_CONFIG;
  2256. }
  2257. }
  2258. if (mask->src) {
  2259. if (mask->src == cpu_to_be32(0xffffffff)) {
  2260. field_flags |= I40EVF_CLOUD_FIELD_IIP;
  2261. } else {
  2262. dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
  2263. be32_to_cpu(mask->dst));
  2264. return I40E_ERR_CONFIG;
  2265. }
  2266. }
  2267. if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
  2268. dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
  2269. return I40E_ERR_CONFIG;
  2270. }
  2271. if (key->dst) {
  2272. vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
  2273. vf->data.tcp_spec.dst_ip[0] = key->dst;
  2274. }
  2275. if (key->src) {
  2276. vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
  2277. vf->data.tcp_spec.src_ip[0] = key->src;
  2278. }
  2279. }
  2280. if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
  2281. struct flow_dissector_key_ipv6_addrs *key =
  2282. skb_flow_dissector_target(f->dissector,
  2283. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  2284. f->key);
  2285. struct flow_dissector_key_ipv6_addrs *mask =
  2286. skb_flow_dissector_target(f->dissector,
  2287. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  2288. f->mask);
  2289. /* validate mask, make sure it is not IPV6_ADDR_ANY */
  2290. if (ipv6_addr_any(&mask->dst)) {
  2291. dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
  2292. IPV6_ADDR_ANY);
  2293. return I40E_ERR_CONFIG;
  2294. }
  2295. /* src and dest IPv6 address should not be LOOPBACK
  2296. * (0:0:0:0:0:0:0:1) which can be represented as ::1
  2297. */
  2298. if (ipv6_addr_loopback(&key->dst) ||
  2299. ipv6_addr_loopback(&key->src)) {
  2300. dev_err(&adapter->pdev->dev,
  2301. "ipv6 addr should not be loopback\n");
  2302. return I40E_ERR_CONFIG;
  2303. }
  2304. if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
  2305. field_flags |= I40EVF_CLOUD_FIELD_IIP;
  2306. for (i = 0; i < 4; i++)
  2307. vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
  2308. memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
  2309. sizeof(vf->data.tcp_spec.dst_ip));
  2310. for (i = 0; i < 4; i++)
  2311. vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
  2312. memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
  2313. sizeof(vf->data.tcp_spec.src_ip));
  2314. }
  2315. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
  2316. struct flow_dissector_key_ports *key =
  2317. skb_flow_dissector_target(f->dissector,
  2318. FLOW_DISSECTOR_KEY_PORTS,
  2319. f->key);
  2320. struct flow_dissector_key_ports *mask =
  2321. skb_flow_dissector_target(f->dissector,
  2322. FLOW_DISSECTOR_KEY_PORTS,
  2323. f->mask);
  2324. if (mask->src) {
  2325. if (mask->src == cpu_to_be16(0xffff)) {
  2326. field_flags |= I40EVF_CLOUD_FIELD_IIP;
  2327. } else {
  2328. dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
  2329. be16_to_cpu(mask->src));
  2330. return I40E_ERR_CONFIG;
  2331. }
  2332. }
  2333. if (mask->dst) {
  2334. if (mask->dst == cpu_to_be16(0xffff)) {
  2335. field_flags |= I40EVF_CLOUD_FIELD_IIP;
  2336. } else {
  2337. dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
  2338. be16_to_cpu(mask->dst));
  2339. return I40E_ERR_CONFIG;
  2340. }
  2341. }
  2342. if (key->dst) {
  2343. vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
  2344. vf->data.tcp_spec.dst_port = key->dst;
  2345. }
  2346. if (key->src) {
  2347. vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
  2348. vf->data.tcp_spec.src_port = key->src;
  2349. }
  2350. }
  2351. vf->field_flags = field_flags;
  2352. return 0;
  2353. }
  2354. /**
  2355. * i40evf_handle_tclass - Forward to a traffic class on the device
  2356. * @adapter: board private structure
  2357. * @tc: traffic class index on the device
  2358. * @filter: pointer to cloud filter structure
  2359. */
  2360. static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
  2361. struct i40evf_cloud_filter *filter)
  2362. {
  2363. if (tc == 0)
  2364. return 0;
  2365. if (tc < adapter->num_tc) {
  2366. if (!filter->f.data.tcp_spec.dst_port) {
  2367. dev_err(&adapter->pdev->dev,
  2368. "Specify destination port to redirect to traffic class other than TC0\n");
  2369. return -EINVAL;
  2370. }
  2371. }
  2372. /* redirect to a traffic class on the same device */
  2373. filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
  2374. filter->f.action_meta = tc;
  2375. return 0;
  2376. }
  2377. /**
  2378. * i40evf_configure_clsflower - Add tc flower filters
  2379. * @adapter: board private structure
  2380. * @cls_flower: Pointer to struct tc_cls_flower_offload
  2381. */
  2382. static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
  2383. struct tc_cls_flower_offload *cls_flower)
  2384. {
  2385. int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
  2386. struct i40evf_cloud_filter *filter = NULL;
  2387. int err = -EINVAL, count = 50;
  2388. if (tc < 0) {
  2389. dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
  2390. return -EINVAL;
  2391. }
  2392. filter = kzalloc(sizeof(*filter), GFP_KERNEL);
  2393. if (!filter)
  2394. return -ENOMEM;
  2395. while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
  2396. &adapter->crit_section)) {
  2397. if (--count == 0)
  2398. goto err;
  2399. udelay(1);
  2400. }
  2401. filter->cookie = cls_flower->cookie;
  2402. /* set the mask to all zeroes to begin with */
  2403. memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
  2404. /* start out with flow type and eth type IPv4 to begin with */
  2405. filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
  2406. err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
  2407. if (err < 0)
  2408. goto err;
  2409. err = i40evf_handle_tclass(adapter, tc, filter);
  2410. if (err < 0)
  2411. goto err;
  2412. /* add filter to the list */
  2413. spin_lock_bh(&adapter->cloud_filter_list_lock);
  2414. list_add_tail(&filter->list, &adapter->cloud_filter_list);
  2415. adapter->num_cloud_filters++;
  2416. filter->add = true;
  2417. adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
  2418. spin_unlock_bh(&adapter->cloud_filter_list_lock);
  2419. err:
  2420. if (err)
  2421. kfree(filter);
  2422. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  2423. return err;
  2424. }
  2425. /* i40evf_find_cf - Find the cloud filter in the list
  2426. * @adapter: Board private structure
  2427. * @cookie: filter specific cookie
  2428. *
  2429. * Returns ptr to the filter object or NULL. Must be called while holding the
  2430. * cloud_filter_list_lock.
  2431. */
  2432. static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
  2433. unsigned long *cookie)
  2434. {
  2435. struct i40evf_cloud_filter *filter = NULL;
  2436. if (!cookie)
  2437. return NULL;
  2438. list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
  2439. if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
  2440. return filter;
  2441. }
  2442. return NULL;
  2443. }
  2444. /**
  2445. * i40evf_delete_clsflower - Remove tc flower filters
  2446. * @adapter: board private structure
  2447. * @cls_flower: Pointer to struct tc_cls_flower_offload
  2448. */
  2449. static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
  2450. struct tc_cls_flower_offload *cls_flower)
  2451. {
  2452. struct i40evf_cloud_filter *filter = NULL;
  2453. int err = 0;
  2454. spin_lock_bh(&adapter->cloud_filter_list_lock);
  2455. filter = i40evf_find_cf(adapter, &cls_flower->cookie);
  2456. if (filter) {
  2457. filter->del = true;
  2458. adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
  2459. } else {
  2460. err = -EINVAL;
  2461. }
  2462. spin_unlock_bh(&adapter->cloud_filter_list_lock);
  2463. return err;
  2464. }
  2465. /**
  2466. * i40evf_setup_tc_cls_flower - flower classifier offloads
  2467. * @netdev: net device to configure
  2468. * @type_data: offload data
  2469. */
  2470. static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
  2471. struct tc_cls_flower_offload *cls_flower)
  2472. {
  2473. if (cls_flower->common.chain_index)
  2474. return -EOPNOTSUPP;
  2475. switch (cls_flower->command) {
  2476. case TC_CLSFLOWER_REPLACE:
  2477. return i40evf_configure_clsflower(adapter, cls_flower);
  2478. case TC_CLSFLOWER_DESTROY:
  2479. return i40evf_delete_clsflower(adapter, cls_flower);
  2480. case TC_CLSFLOWER_STATS:
  2481. return -EOPNOTSUPP;
  2482. default:
  2483. return -EOPNOTSUPP;
  2484. }
  2485. }
  2486. /**
  2487. * i40evf_setup_tc_block_cb - block callback for tc
  2488. * @type: type of offload
  2489. * @type_data: offload data
  2490. * @cb_priv:
  2491. *
  2492. * This function is the block callback for traffic classes
  2493. **/
  2494. static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
  2495. void *cb_priv)
  2496. {
  2497. switch (type) {
  2498. case TC_SETUP_CLSFLOWER:
  2499. return i40evf_setup_tc_cls_flower(cb_priv, type_data);
  2500. default:
  2501. return -EOPNOTSUPP;
  2502. }
  2503. }
  2504. /**
  2505. * i40evf_setup_tc_block - register callbacks for tc
  2506. * @netdev: network interface device structure
  2507. * @f: tc offload data
  2508. *
  2509. * This function registers block callbacks for tc
  2510. * offloads
  2511. **/
  2512. static int i40evf_setup_tc_block(struct net_device *dev,
  2513. struct tc_block_offload *f)
  2514. {
  2515. struct i40evf_adapter *adapter = netdev_priv(dev);
  2516. if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  2517. return -EOPNOTSUPP;
  2518. switch (f->command) {
  2519. case TC_BLOCK_BIND:
  2520. return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
  2521. adapter, adapter, f->extack);
  2522. case TC_BLOCK_UNBIND:
  2523. tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
  2524. adapter);
  2525. return 0;
  2526. default:
  2527. return -EOPNOTSUPP;
  2528. }
  2529. }
  2530. /**
  2531. * i40evf_setup_tc - configure multiple traffic classes
  2532. * @netdev: network interface device structure
  2533. * @type: type of offload
  2534. * @type_date: tc offload data
  2535. *
  2536. * This function is the callback to ndo_setup_tc in the
  2537. * netdev_ops.
  2538. *
  2539. * Returns 0 on success
  2540. **/
  2541. static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
  2542. void *type_data)
  2543. {
  2544. switch (type) {
  2545. case TC_SETUP_QDISC_MQPRIO:
  2546. return __i40evf_setup_tc(netdev, type_data);
  2547. case TC_SETUP_BLOCK:
  2548. return i40evf_setup_tc_block(netdev, type_data);
  2549. default:
  2550. return -EOPNOTSUPP;
  2551. }
  2552. }
  2553. /**
  2554. * i40evf_open - Called when a network interface is made active
  2555. * @netdev: network interface device structure
  2556. *
  2557. * Returns 0 on success, negative value on failure
  2558. *
  2559. * The open entry point is called when a network interface is made
  2560. * active by the system (IFF_UP). At this point all resources needed
  2561. * for transmit and receive operations are allocated, the interrupt
  2562. * handler is registered with the OS, the watchdog timer is started,
  2563. * and the stack is notified that the interface is ready.
  2564. **/
  2565. static int i40evf_open(struct net_device *netdev)
  2566. {
  2567. struct i40evf_adapter *adapter = netdev_priv(netdev);
  2568. int err;
  2569. if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
  2570. dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
  2571. return -EIO;
  2572. }
  2573. while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
  2574. &adapter->crit_section))
  2575. usleep_range(500, 1000);
  2576. if (adapter->state != __I40EVF_DOWN) {
  2577. err = -EBUSY;
  2578. goto err_unlock;
  2579. }
  2580. /* allocate transmit descriptors */
  2581. err = i40evf_setup_all_tx_resources(adapter);
  2582. if (err)
  2583. goto err_setup_tx;
  2584. /* allocate receive descriptors */
  2585. err = i40evf_setup_all_rx_resources(adapter);
  2586. if (err)
  2587. goto err_setup_rx;
  2588. /* clear any pending interrupts, may auto mask */
  2589. err = i40evf_request_traffic_irqs(adapter, netdev->name);
  2590. if (err)
  2591. goto err_req_irq;
  2592. spin_lock_bh(&adapter->mac_vlan_list_lock);
  2593. i40evf_add_filter(adapter, adapter->hw.mac.addr);
  2594. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  2595. i40evf_configure(adapter);
  2596. i40evf_up_complete(adapter);
  2597. i40evf_irq_enable(adapter, true);
  2598. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  2599. return 0;
  2600. err_req_irq:
  2601. i40evf_down(adapter);
  2602. i40evf_free_traffic_irqs(adapter);
  2603. err_setup_rx:
  2604. i40evf_free_all_rx_resources(adapter);
  2605. err_setup_tx:
  2606. i40evf_free_all_tx_resources(adapter);
  2607. err_unlock:
  2608. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  2609. return err;
  2610. }
  2611. /**
  2612. * i40evf_close - Disables a network interface
  2613. * @netdev: network interface device structure
  2614. *
  2615. * Returns 0, this is not allowed to fail
  2616. *
  2617. * The close entry point is called when an interface is de-activated
  2618. * by the OS. The hardware is still under the drivers control, but
  2619. * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
  2620. * are freed, along with all transmit and receive resources.
  2621. **/
  2622. static int i40evf_close(struct net_device *netdev)
  2623. {
  2624. struct i40evf_adapter *adapter = netdev_priv(netdev);
  2625. int status;
  2626. if (adapter->state <= __I40EVF_DOWN_PENDING)
  2627. return 0;
  2628. while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
  2629. &adapter->crit_section))
  2630. usleep_range(500, 1000);
  2631. set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
  2632. if (CLIENT_ENABLED(adapter))
  2633. adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
  2634. i40evf_down(adapter);
  2635. adapter->state = __I40EVF_DOWN_PENDING;
  2636. i40evf_free_traffic_irqs(adapter);
  2637. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  2638. /* We explicitly don't free resources here because the hardware is
  2639. * still active and can DMA into memory. Resources are cleared in
  2640. * i40evf_virtchnl_completion() after we get confirmation from the PF
  2641. * driver that the rings have been stopped.
  2642. *
  2643. * Also, we wait for state to transition to __I40EVF_DOWN before
  2644. * returning. State change occurs in i40evf_virtchnl_completion() after
  2645. * VF resources are released (which occurs after PF driver processes and
  2646. * responds to admin queue commands).
  2647. */
  2648. status = wait_event_timeout(adapter->down_waitqueue,
  2649. adapter->state == __I40EVF_DOWN,
  2650. msecs_to_jiffies(200));
  2651. if (!status)
  2652. netdev_warn(netdev, "Device resources not yet released\n");
  2653. return 0;
  2654. }
  2655. /**
  2656. * i40evf_change_mtu - Change the Maximum Transfer Unit
  2657. * @netdev: network interface device structure
  2658. * @new_mtu: new value for maximum frame size
  2659. *
  2660. * Returns 0 on success, negative on failure
  2661. **/
  2662. static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
  2663. {
  2664. struct i40evf_adapter *adapter = netdev_priv(netdev);
  2665. netdev->mtu = new_mtu;
  2666. if (CLIENT_ENABLED(adapter)) {
  2667. i40evf_notify_client_l2_params(&adapter->vsi);
  2668. adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
  2669. }
  2670. adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
  2671. schedule_work(&adapter->reset_task);
  2672. return 0;
  2673. }
  2674. /**
  2675. * i40e_set_features - set the netdev feature flags
  2676. * @netdev: ptr to the netdev being adjusted
  2677. * @features: the feature set that the stack is suggesting
  2678. * Note: expects to be called while under rtnl_lock()
  2679. **/
  2680. static int i40evf_set_features(struct net_device *netdev,
  2681. netdev_features_t features)
  2682. {
  2683. struct i40evf_adapter *adapter = netdev_priv(netdev);
  2684. /* Don't allow changing VLAN_RX flag when VLAN is set for VF
  2685. * and return an error in this case
  2686. */
  2687. if (VLAN_ALLOWED(adapter)) {
  2688. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  2689. adapter->aq_required |=
  2690. I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
  2691. else
  2692. adapter->aq_required |=
  2693. I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
  2694. } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
  2695. return -EINVAL;
  2696. }
  2697. return 0;
  2698. }
  2699. /**
  2700. * i40evf_features_check - Validate encapsulated packet conforms to limits
  2701. * @skb: skb buff
  2702. * @dev: This physical port's netdev
  2703. * @features: Offload features that the stack believes apply
  2704. **/
  2705. static netdev_features_t i40evf_features_check(struct sk_buff *skb,
  2706. struct net_device *dev,
  2707. netdev_features_t features)
  2708. {
  2709. size_t len;
  2710. /* No point in doing any of this if neither checksum nor GSO are
  2711. * being requested for this frame. We can rule out both by just
  2712. * checking for CHECKSUM_PARTIAL
  2713. */
  2714. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2715. return features;
  2716. /* We cannot support GSO if the MSS is going to be less than
  2717. * 64 bytes. If it is then we need to drop support for GSO.
  2718. */
  2719. if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
  2720. features &= ~NETIF_F_GSO_MASK;
  2721. /* MACLEN can support at most 63 words */
  2722. len = skb_network_header(skb) - skb->data;
  2723. if (len & ~(63 * 2))
  2724. goto out_err;
  2725. /* IPLEN and EIPLEN can support at most 127 dwords */
  2726. len = skb_transport_header(skb) - skb_network_header(skb);
  2727. if (len & ~(127 * 4))
  2728. goto out_err;
  2729. if (skb->encapsulation) {
  2730. /* L4TUNLEN can support 127 words */
  2731. len = skb_inner_network_header(skb) - skb_transport_header(skb);
  2732. if (len & ~(127 * 2))
  2733. goto out_err;
  2734. /* IPLEN can support at most 127 dwords */
  2735. len = skb_inner_transport_header(skb) -
  2736. skb_inner_network_header(skb);
  2737. if (len & ~(127 * 4))
  2738. goto out_err;
  2739. }
  2740. /* No need to validate L4LEN as TCP is the only protocol with a
  2741. * a flexible value and we support all possible values supported
  2742. * by TCP, which is at most 15 dwords
  2743. */
  2744. return features;
  2745. out_err:
  2746. return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
  2747. }
  2748. /**
  2749. * i40evf_fix_features - fix up the netdev feature bits
  2750. * @netdev: our net device
  2751. * @features: desired feature bits
  2752. *
  2753. * Returns fixed-up features bits
  2754. **/
  2755. static netdev_features_t i40evf_fix_features(struct net_device *netdev,
  2756. netdev_features_t features)
  2757. {
  2758. struct i40evf_adapter *adapter = netdev_priv(netdev);
  2759. if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
  2760. features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
  2761. NETIF_F_HW_VLAN_CTAG_RX |
  2762. NETIF_F_HW_VLAN_CTAG_FILTER);
  2763. return features;
  2764. }
  2765. static const struct net_device_ops i40evf_netdev_ops = {
  2766. .ndo_open = i40evf_open,
  2767. .ndo_stop = i40evf_close,
  2768. .ndo_start_xmit = i40evf_xmit_frame,
  2769. .ndo_set_rx_mode = i40evf_set_rx_mode,
  2770. .ndo_validate_addr = eth_validate_addr,
  2771. .ndo_set_mac_address = i40evf_set_mac,
  2772. .ndo_change_mtu = i40evf_change_mtu,
  2773. .ndo_tx_timeout = i40evf_tx_timeout,
  2774. .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
  2775. .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
  2776. .ndo_features_check = i40evf_features_check,
  2777. .ndo_fix_features = i40evf_fix_features,
  2778. .ndo_set_features = i40evf_set_features,
  2779. .ndo_setup_tc = i40evf_setup_tc,
  2780. };
  2781. /**
  2782. * i40evf_check_reset_complete - check that VF reset is complete
  2783. * @hw: pointer to hw struct
  2784. *
  2785. * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
  2786. **/
  2787. static int i40evf_check_reset_complete(struct i40e_hw *hw)
  2788. {
  2789. u32 rstat;
  2790. int i;
  2791. for (i = 0; i < 100; i++) {
  2792. rstat = rd32(hw, I40E_VFGEN_RSTAT) &
  2793. I40E_VFGEN_RSTAT_VFR_STATE_MASK;
  2794. if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
  2795. (rstat == VIRTCHNL_VFR_COMPLETED))
  2796. return 0;
  2797. usleep_range(10, 20);
  2798. }
  2799. return -EBUSY;
  2800. }
  2801. /**
  2802. * i40evf_process_config - Process the config information we got from the PF
  2803. * @adapter: board private structure
  2804. *
  2805. * Verify that we have a valid config struct, and set up our netdev features
  2806. * and our VSI struct.
  2807. **/
  2808. int i40evf_process_config(struct i40evf_adapter *adapter)
  2809. {
  2810. struct virtchnl_vf_resource *vfres = adapter->vf_res;
  2811. int i, num_req_queues = adapter->num_req_queues;
  2812. struct net_device *netdev = adapter->netdev;
  2813. struct i40e_vsi *vsi = &adapter->vsi;
  2814. netdev_features_t hw_enc_features;
  2815. netdev_features_t hw_features;
  2816. /* got VF config message back from PF, now we can parse it */
  2817. for (i = 0; i < vfres->num_vsis; i++) {
  2818. if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
  2819. adapter->vsi_res = &vfres->vsi_res[i];
  2820. }
  2821. if (!adapter->vsi_res) {
  2822. dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
  2823. return -ENODEV;
  2824. }
  2825. if (num_req_queues &&
  2826. num_req_queues != adapter->vsi_res->num_queue_pairs) {
  2827. /* Problem. The PF gave us fewer queues than what we had
  2828. * negotiated in our request. Need a reset to see if we can't
  2829. * get back to a working state.
  2830. */
  2831. dev_err(&adapter->pdev->dev,
  2832. "Requested %d queues, but PF only gave us %d.\n",
  2833. num_req_queues,
  2834. adapter->vsi_res->num_queue_pairs);
  2835. adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
  2836. adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
  2837. i40evf_schedule_reset(adapter);
  2838. return -ENODEV;
  2839. }
  2840. adapter->num_req_queues = 0;
  2841. hw_enc_features = NETIF_F_SG |
  2842. NETIF_F_IP_CSUM |
  2843. NETIF_F_IPV6_CSUM |
  2844. NETIF_F_HIGHDMA |
  2845. NETIF_F_SOFT_FEATURES |
  2846. NETIF_F_TSO |
  2847. NETIF_F_TSO_ECN |
  2848. NETIF_F_TSO6 |
  2849. NETIF_F_SCTP_CRC |
  2850. NETIF_F_RXHASH |
  2851. NETIF_F_RXCSUM |
  2852. 0;
  2853. /* advertise to stack only if offloads for encapsulated packets is
  2854. * supported
  2855. */
  2856. if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
  2857. hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
  2858. NETIF_F_GSO_GRE |
  2859. NETIF_F_GSO_GRE_CSUM |
  2860. NETIF_F_GSO_IPXIP4 |
  2861. NETIF_F_GSO_IPXIP6 |
  2862. NETIF_F_GSO_UDP_TUNNEL_CSUM |
  2863. NETIF_F_GSO_PARTIAL |
  2864. 0;
  2865. if (!(vfres->vf_cap_flags &
  2866. VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
  2867. netdev->gso_partial_features |=
  2868. NETIF_F_GSO_UDP_TUNNEL_CSUM;
  2869. netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
  2870. netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
  2871. netdev->hw_enc_features |= hw_enc_features;
  2872. }
  2873. /* record features VLANs can make use of */
  2874. netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
  2875. /* Write features and hw_features separately to avoid polluting
  2876. * with, or dropping, features that are set when we registered.
  2877. */
  2878. hw_features = hw_enc_features;
  2879. /* Enable VLAN features if supported */
  2880. if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
  2881. hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
  2882. NETIF_F_HW_VLAN_CTAG_RX);
  2883. /* Enable cloud filter if ADQ is supported */
  2884. if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
  2885. hw_features |= NETIF_F_HW_TC;
  2886. netdev->hw_features |= hw_features;
  2887. netdev->features |= hw_features;
  2888. if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
  2889. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  2890. /* Do not turn on offloads when they are requested to be turned off.
  2891. * TSO needs minimum 576 bytes to work correctly.
  2892. */
  2893. if (netdev->wanted_features) {
  2894. if (!(netdev->wanted_features & NETIF_F_TSO) ||
  2895. netdev->mtu < 576)
  2896. netdev->features &= ~NETIF_F_TSO;
  2897. if (!(netdev->wanted_features & NETIF_F_TSO6) ||
  2898. netdev->mtu < 576)
  2899. netdev->features &= ~NETIF_F_TSO6;
  2900. if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
  2901. netdev->features &= ~NETIF_F_TSO_ECN;
  2902. if (!(netdev->wanted_features & NETIF_F_GRO))
  2903. netdev->features &= ~NETIF_F_GRO;
  2904. if (!(netdev->wanted_features & NETIF_F_GSO))
  2905. netdev->features &= ~NETIF_F_GSO;
  2906. }
  2907. adapter->vsi.id = adapter->vsi_res->vsi_id;
  2908. adapter->vsi.back = adapter;
  2909. adapter->vsi.base_vector = 1;
  2910. adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
  2911. vsi->netdev = adapter->netdev;
  2912. vsi->qs_handle = adapter->vsi_res->qset_handle;
  2913. if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
  2914. adapter->rss_key_size = vfres->rss_key_size;
  2915. adapter->rss_lut_size = vfres->rss_lut_size;
  2916. } else {
  2917. adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
  2918. adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
  2919. }
  2920. return 0;
  2921. }
  2922. /**
  2923. * i40evf_init_task - worker thread to perform delayed initialization
  2924. * @work: pointer to work_struct containing our data
  2925. *
  2926. * This task completes the work that was begun in probe. Due to the nature
  2927. * of VF-PF communications, we may need to wait tens of milliseconds to get
  2928. * responses back from the PF. Rather than busy-wait in probe and bog down the
  2929. * whole system, we'll do it in a task so we can sleep.
  2930. * This task only runs during driver init. Once we've established
  2931. * communications with the PF driver and set up our netdev, the watchdog
  2932. * takes over.
  2933. **/
  2934. static void i40evf_init_task(struct work_struct *work)
  2935. {
  2936. struct i40evf_adapter *adapter = container_of(work,
  2937. struct i40evf_adapter,
  2938. init_task.work);
  2939. struct net_device *netdev = adapter->netdev;
  2940. struct i40e_hw *hw = &adapter->hw;
  2941. struct pci_dev *pdev = adapter->pdev;
  2942. int err, bufsz;
  2943. switch (adapter->state) {
  2944. case __I40EVF_STARTUP:
  2945. /* driver loaded, probe complete */
  2946. adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
  2947. adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
  2948. err = i40e_set_mac_type(hw);
  2949. if (err) {
  2950. dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
  2951. err);
  2952. goto err;
  2953. }
  2954. err = i40evf_check_reset_complete(hw);
  2955. if (err) {
  2956. dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
  2957. err);
  2958. goto err;
  2959. }
  2960. hw->aq.num_arq_entries = I40EVF_AQ_LEN;
  2961. hw->aq.num_asq_entries = I40EVF_AQ_LEN;
  2962. hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
  2963. hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
  2964. err = i40evf_init_adminq(hw);
  2965. if (err) {
  2966. dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
  2967. err);
  2968. goto err;
  2969. }
  2970. err = i40evf_send_api_ver(adapter);
  2971. if (err) {
  2972. dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
  2973. i40evf_shutdown_adminq(hw);
  2974. goto err;
  2975. }
  2976. adapter->state = __I40EVF_INIT_VERSION_CHECK;
  2977. goto restart;
  2978. case __I40EVF_INIT_VERSION_CHECK:
  2979. if (!i40evf_asq_done(hw)) {
  2980. dev_err(&pdev->dev, "Admin queue command never completed\n");
  2981. i40evf_shutdown_adminq(hw);
  2982. adapter->state = __I40EVF_STARTUP;
  2983. goto err;
  2984. }
  2985. /* aq msg sent, awaiting reply */
  2986. err = i40evf_verify_api_ver(adapter);
  2987. if (err) {
  2988. if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
  2989. err = i40evf_send_api_ver(adapter);
  2990. else
  2991. dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
  2992. adapter->pf_version.major,
  2993. adapter->pf_version.minor,
  2994. VIRTCHNL_VERSION_MAJOR,
  2995. VIRTCHNL_VERSION_MINOR);
  2996. goto err;
  2997. }
  2998. err = i40evf_send_vf_config_msg(adapter);
  2999. if (err) {
  3000. dev_err(&pdev->dev, "Unable to send config request (%d)\n",
  3001. err);
  3002. goto err;
  3003. }
  3004. adapter->state = __I40EVF_INIT_GET_RESOURCES;
  3005. goto restart;
  3006. case __I40EVF_INIT_GET_RESOURCES:
  3007. /* aq msg sent, awaiting reply */
  3008. if (!adapter->vf_res) {
  3009. bufsz = sizeof(struct virtchnl_vf_resource) +
  3010. (I40E_MAX_VF_VSI *
  3011. sizeof(struct virtchnl_vsi_resource));
  3012. adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
  3013. if (!adapter->vf_res)
  3014. goto err;
  3015. }
  3016. err = i40evf_get_vf_config(adapter);
  3017. if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
  3018. err = i40evf_send_vf_config_msg(adapter);
  3019. goto err;
  3020. } else if (err == I40E_ERR_PARAM) {
  3021. /* We only get ERR_PARAM if the device is in a very bad
  3022. * state or if we've been disabled for previous bad
  3023. * behavior. Either way, we're done now.
  3024. */
  3025. i40evf_shutdown_adminq(hw);
  3026. dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
  3027. return;
  3028. }
  3029. if (err) {
  3030. dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
  3031. err);
  3032. goto err_alloc;
  3033. }
  3034. adapter->state = __I40EVF_INIT_SW;
  3035. break;
  3036. default:
  3037. goto err_alloc;
  3038. }
  3039. if (i40evf_process_config(adapter))
  3040. goto err_alloc;
  3041. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  3042. adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
  3043. netdev->netdev_ops = &i40evf_netdev_ops;
  3044. i40evf_set_ethtool_ops(netdev);
  3045. netdev->watchdog_timeo = 5 * HZ;
  3046. /* MTU range: 68 - 9710 */
  3047. netdev->min_mtu = ETH_MIN_MTU;
  3048. netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
  3049. if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
  3050. dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
  3051. adapter->hw.mac.addr);
  3052. eth_hw_addr_random(netdev);
  3053. ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
  3054. } else {
  3055. adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
  3056. ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
  3057. ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
  3058. }
  3059. timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
  3060. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  3061. adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
  3062. adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
  3063. err = i40evf_init_interrupt_scheme(adapter);
  3064. if (err)
  3065. goto err_sw_init;
  3066. i40evf_map_rings_to_vectors(adapter);
  3067. if (adapter->vf_res->vf_cap_flags &
  3068. VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
  3069. adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
  3070. err = i40evf_request_misc_irq(adapter);
  3071. if (err)
  3072. goto err_sw_init;
  3073. netif_carrier_off(netdev);
  3074. adapter->link_up = false;
  3075. if (!adapter->netdev_registered) {
  3076. err = register_netdev(netdev);
  3077. if (err)
  3078. goto err_register;
  3079. }
  3080. adapter->netdev_registered = true;
  3081. netif_tx_stop_all_queues(netdev);
  3082. if (CLIENT_ALLOWED(adapter)) {
  3083. err = i40evf_lan_add_device(adapter);
  3084. if (err)
  3085. dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
  3086. err);
  3087. }
  3088. dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
  3089. if (netdev->features & NETIF_F_GRO)
  3090. dev_info(&pdev->dev, "GRO is enabled\n");
  3091. adapter->state = __I40EVF_DOWN;
  3092. set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
  3093. i40evf_misc_irq_enable(adapter);
  3094. wake_up(&adapter->down_waitqueue);
  3095. adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
  3096. adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
  3097. if (!adapter->rss_key || !adapter->rss_lut)
  3098. goto err_mem;
  3099. if (RSS_AQ(adapter)) {
  3100. adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
  3101. mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
  3102. } else {
  3103. i40evf_init_rss(adapter);
  3104. }
  3105. return;
  3106. restart:
  3107. schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
  3108. return;
  3109. err_mem:
  3110. i40evf_free_rss(adapter);
  3111. err_register:
  3112. i40evf_free_misc_irq(adapter);
  3113. err_sw_init:
  3114. i40evf_reset_interrupt_capability(adapter);
  3115. err_alloc:
  3116. kfree(adapter->vf_res);
  3117. adapter->vf_res = NULL;
  3118. err:
  3119. /* Things went into the weeds, so try again later */
  3120. if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
  3121. dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
  3122. adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
  3123. i40evf_shutdown_adminq(hw);
  3124. adapter->state = __I40EVF_STARTUP;
  3125. schedule_delayed_work(&adapter->init_task, HZ * 5);
  3126. return;
  3127. }
  3128. schedule_delayed_work(&adapter->init_task, HZ);
  3129. }
  3130. /**
  3131. * i40evf_shutdown - Shutdown the device in preparation for a reboot
  3132. * @pdev: pci device structure
  3133. **/
  3134. static void i40evf_shutdown(struct pci_dev *pdev)
  3135. {
  3136. struct net_device *netdev = pci_get_drvdata(pdev);
  3137. struct i40evf_adapter *adapter = netdev_priv(netdev);
  3138. netif_device_detach(netdev);
  3139. if (netif_running(netdev))
  3140. i40evf_close(netdev);
  3141. /* Prevent the watchdog from running. */
  3142. adapter->state = __I40EVF_REMOVE;
  3143. adapter->aq_required = 0;
  3144. #ifdef CONFIG_PM
  3145. pci_save_state(pdev);
  3146. #endif
  3147. pci_disable_device(pdev);
  3148. }
  3149. /**
  3150. * i40evf_probe - Device Initialization Routine
  3151. * @pdev: PCI device information struct
  3152. * @ent: entry in i40evf_pci_tbl
  3153. *
  3154. * Returns 0 on success, negative on failure
  3155. *
  3156. * i40evf_probe initializes an adapter identified by a pci_dev structure.
  3157. * The OS initialization, configuring of the adapter private structure,
  3158. * and a hardware reset occur.
  3159. **/
  3160. static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  3161. {
  3162. struct net_device *netdev;
  3163. struct i40evf_adapter *adapter = NULL;
  3164. struct i40e_hw *hw = NULL;
  3165. int err;
  3166. err = pci_enable_device(pdev);
  3167. if (err)
  3168. return err;
  3169. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  3170. if (err) {
  3171. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  3172. if (err) {
  3173. dev_err(&pdev->dev,
  3174. "DMA configuration failed: 0x%x\n", err);
  3175. goto err_dma;
  3176. }
  3177. }
  3178. err = pci_request_regions(pdev, i40evf_driver_name);
  3179. if (err) {
  3180. dev_err(&pdev->dev,
  3181. "pci_request_regions failed 0x%x\n", err);
  3182. goto err_pci_reg;
  3183. }
  3184. pci_enable_pcie_error_reporting(pdev);
  3185. pci_set_master(pdev);
  3186. netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
  3187. I40EVF_MAX_REQ_QUEUES);
  3188. if (!netdev) {
  3189. err = -ENOMEM;
  3190. goto err_alloc_etherdev;
  3191. }
  3192. SET_NETDEV_DEV(netdev, &pdev->dev);
  3193. pci_set_drvdata(pdev, netdev);
  3194. adapter = netdev_priv(netdev);
  3195. adapter->netdev = netdev;
  3196. adapter->pdev = pdev;
  3197. hw = &adapter->hw;
  3198. hw->back = adapter;
  3199. adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
  3200. adapter->state = __I40EVF_STARTUP;
  3201. /* Call save state here because it relies on the adapter struct. */
  3202. pci_save_state(pdev);
  3203. hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
  3204. pci_resource_len(pdev, 0));
  3205. if (!hw->hw_addr) {
  3206. err = -EIO;
  3207. goto err_ioremap;
  3208. }
  3209. hw->vendor_id = pdev->vendor;
  3210. hw->device_id = pdev->device;
  3211. pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  3212. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  3213. hw->subsystem_device_id = pdev->subsystem_device;
  3214. hw->bus.device = PCI_SLOT(pdev->devfn);
  3215. hw->bus.func = PCI_FUNC(pdev->devfn);
  3216. hw->bus.bus_id = pdev->bus->number;
  3217. /* set up the locks for the AQ, do this only once in probe
  3218. * and destroy them only once in remove
  3219. */
  3220. mutex_init(&hw->aq.asq_mutex);
  3221. mutex_init(&hw->aq.arq_mutex);
  3222. spin_lock_init(&adapter->mac_vlan_list_lock);
  3223. spin_lock_init(&adapter->cloud_filter_list_lock);
  3224. INIT_LIST_HEAD(&adapter->mac_filter_list);
  3225. INIT_LIST_HEAD(&adapter->vlan_filter_list);
  3226. INIT_LIST_HEAD(&adapter->cloud_filter_list);
  3227. INIT_WORK(&adapter->reset_task, i40evf_reset_task);
  3228. INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
  3229. INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
  3230. INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
  3231. INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
  3232. schedule_delayed_work(&adapter->init_task,
  3233. msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
  3234. /* Setup the wait queue for indicating transition to down status */
  3235. init_waitqueue_head(&adapter->down_waitqueue);
  3236. return 0;
  3237. err_ioremap:
  3238. free_netdev(netdev);
  3239. err_alloc_etherdev:
  3240. pci_release_regions(pdev);
  3241. err_pci_reg:
  3242. err_dma:
  3243. pci_disable_device(pdev);
  3244. return err;
  3245. }
  3246. #ifdef CONFIG_PM
  3247. /**
  3248. * i40evf_suspend - Power management suspend routine
  3249. * @pdev: PCI device information struct
  3250. * @state: unused
  3251. *
  3252. * Called when the system (VM) is entering sleep/suspend.
  3253. **/
  3254. static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
  3255. {
  3256. struct net_device *netdev = pci_get_drvdata(pdev);
  3257. struct i40evf_adapter *adapter = netdev_priv(netdev);
  3258. int retval = 0;
  3259. netif_device_detach(netdev);
  3260. while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
  3261. &adapter->crit_section))
  3262. usleep_range(500, 1000);
  3263. if (netif_running(netdev)) {
  3264. rtnl_lock();
  3265. i40evf_down(adapter);
  3266. rtnl_unlock();
  3267. }
  3268. i40evf_free_misc_irq(adapter);
  3269. i40evf_reset_interrupt_capability(adapter);
  3270. clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
  3271. retval = pci_save_state(pdev);
  3272. if (retval)
  3273. return retval;
  3274. pci_disable_device(pdev);
  3275. return 0;
  3276. }
  3277. /**
  3278. * i40evf_resume - Power management resume routine
  3279. * @pdev: PCI device information struct
  3280. *
  3281. * Called when the system (VM) is resumed from sleep/suspend.
  3282. **/
  3283. static int i40evf_resume(struct pci_dev *pdev)
  3284. {
  3285. struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
  3286. struct net_device *netdev = adapter->netdev;
  3287. u32 err;
  3288. pci_set_power_state(pdev, PCI_D0);
  3289. pci_restore_state(pdev);
  3290. /* pci_restore_state clears dev->state_saved so call
  3291. * pci_save_state to restore it.
  3292. */
  3293. pci_save_state(pdev);
  3294. err = pci_enable_device_mem(pdev);
  3295. if (err) {
  3296. dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
  3297. return err;
  3298. }
  3299. pci_set_master(pdev);
  3300. rtnl_lock();
  3301. err = i40evf_set_interrupt_capability(adapter);
  3302. if (err) {
  3303. rtnl_unlock();
  3304. dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
  3305. return err;
  3306. }
  3307. err = i40evf_request_misc_irq(adapter);
  3308. rtnl_unlock();
  3309. if (err) {
  3310. dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
  3311. return err;
  3312. }
  3313. schedule_work(&adapter->reset_task);
  3314. netif_device_attach(netdev);
  3315. return err;
  3316. }
  3317. #endif /* CONFIG_PM */
  3318. /**
  3319. * i40evf_remove - Device Removal Routine
  3320. * @pdev: PCI device information struct
  3321. *
  3322. * i40evf_remove is called by the PCI subsystem to alert the driver
  3323. * that it should release a PCI device. The could be caused by a
  3324. * Hot-Plug event, or because the driver is going to be removed from
  3325. * memory.
  3326. **/
  3327. static void i40evf_remove(struct pci_dev *pdev)
  3328. {
  3329. struct net_device *netdev = pci_get_drvdata(pdev);
  3330. struct i40evf_adapter *adapter = netdev_priv(netdev);
  3331. struct i40evf_vlan_filter *vlf, *vlftmp;
  3332. struct i40evf_mac_filter *f, *ftmp;
  3333. struct i40evf_cloud_filter *cf, *cftmp;
  3334. struct i40e_hw *hw = &adapter->hw;
  3335. int err;
  3336. /* Indicate we are in remove and not to run reset_task */
  3337. set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
  3338. cancel_delayed_work_sync(&adapter->init_task);
  3339. cancel_work_sync(&adapter->reset_task);
  3340. cancel_delayed_work_sync(&adapter->client_task);
  3341. if (adapter->netdev_registered) {
  3342. unregister_netdev(netdev);
  3343. adapter->netdev_registered = false;
  3344. }
  3345. if (CLIENT_ALLOWED(adapter)) {
  3346. err = i40evf_lan_del_device(adapter);
  3347. if (err)
  3348. dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
  3349. err);
  3350. }
  3351. /* Shut down all the garbage mashers on the detention level */
  3352. adapter->state = __I40EVF_REMOVE;
  3353. adapter->aq_required = 0;
  3354. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  3355. i40evf_request_reset(adapter);
  3356. msleep(50);
  3357. /* If the FW isn't responding, kick it once, but only once. */
  3358. if (!i40evf_asq_done(hw)) {
  3359. i40evf_request_reset(adapter);
  3360. msleep(50);
  3361. }
  3362. i40evf_free_all_tx_resources(adapter);
  3363. i40evf_free_all_rx_resources(adapter);
  3364. i40evf_misc_irq_disable(adapter);
  3365. i40evf_free_misc_irq(adapter);
  3366. i40evf_reset_interrupt_capability(adapter);
  3367. i40evf_free_q_vectors(adapter);
  3368. if (adapter->watchdog_timer.function)
  3369. del_timer_sync(&adapter->watchdog_timer);
  3370. i40evf_free_rss(adapter);
  3371. if (hw->aq.asq.count)
  3372. i40evf_shutdown_adminq(hw);
  3373. /* destroy the locks only once, here */
  3374. mutex_destroy(&hw->aq.arq_mutex);
  3375. mutex_destroy(&hw->aq.asq_mutex);
  3376. iounmap(hw->hw_addr);
  3377. pci_release_regions(pdev);
  3378. i40evf_free_all_tx_resources(adapter);
  3379. i40evf_free_all_rx_resources(adapter);
  3380. i40evf_free_queues(adapter);
  3381. kfree(adapter->vf_res);
  3382. spin_lock_bh(&adapter->mac_vlan_list_lock);
  3383. /* If we got removed before an up/down sequence, we've got a filter
  3384. * hanging out there that we need to get rid of.
  3385. */
  3386. list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
  3387. list_del(&f->list);
  3388. kfree(f);
  3389. }
  3390. list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
  3391. list) {
  3392. list_del(&vlf->list);
  3393. kfree(vlf);
  3394. }
  3395. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  3396. spin_lock_bh(&adapter->cloud_filter_list_lock);
  3397. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
  3398. list_del(&cf->list);
  3399. kfree(cf);
  3400. }
  3401. spin_unlock_bh(&adapter->cloud_filter_list_lock);
  3402. free_netdev(netdev);
  3403. pci_disable_pcie_error_reporting(pdev);
  3404. pci_disable_device(pdev);
  3405. }
  3406. static struct pci_driver i40evf_driver = {
  3407. .name = i40evf_driver_name,
  3408. .id_table = i40evf_pci_tbl,
  3409. .probe = i40evf_probe,
  3410. .remove = i40evf_remove,
  3411. #ifdef CONFIG_PM
  3412. .suspend = i40evf_suspend,
  3413. .resume = i40evf_resume,
  3414. #endif
  3415. .shutdown = i40evf_shutdown,
  3416. };
  3417. /**
  3418. * i40e_init_module - Driver Registration Routine
  3419. *
  3420. * i40e_init_module is the first routine called when the driver is
  3421. * loaded. All it does is register with the PCI subsystem.
  3422. **/
  3423. static int __init i40evf_init_module(void)
  3424. {
  3425. int ret;
  3426. pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
  3427. i40evf_driver_version);
  3428. pr_info("%s\n", i40evf_copyright);
  3429. i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
  3430. i40evf_driver_name);
  3431. if (!i40evf_wq) {
  3432. pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
  3433. return -ENOMEM;
  3434. }
  3435. ret = pci_register_driver(&i40evf_driver);
  3436. return ret;
  3437. }
  3438. module_init(i40evf_init_module);
  3439. /**
  3440. * i40e_exit_module - Driver Exit Cleanup Routine
  3441. *
  3442. * i40e_exit_module is called just before the driver is removed
  3443. * from memory.
  3444. **/
  3445. static void __exit i40evf_exit_module(void)
  3446. {
  3447. pci_unregister_driver(&i40evf_driver);
  3448. destroy_workqueue(i40evf_wq);
  3449. }
  3450. module_exit(i40evf_exit_module);
  3451. /* i40evf_main.c */