fm10k_pci.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673
  1. /* Intel(R) Ethernet Switch Host Interface Driver
  2. * Copyright(c) 2013 - 2017 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * The full GNU General Public License is included in this distribution in
  14. * the file called "COPYING".
  15. *
  16. * Contact Information:
  17. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. */
  20. #include <linux/module.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/aer.h>
  23. #include "fm10k.h"
  24. static const struct fm10k_info *fm10k_info_tbl[] = {
  25. [fm10k_device_pf] = &fm10k_pf_info,
  26. [fm10k_device_vf] = &fm10k_vf_info,
  27. };
  28. /**
  29. * fm10k_pci_tbl - PCI Device ID Table
  30. *
  31. * Wildcard entries (PCI_ANY_ID) should come last
  32. * Last entry must be all 0s
  33. *
  34. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  35. * Class, Class Mask, private data (not used) }
  36. */
  37. static const struct pci_device_id fm10k_pci_tbl[] = {
  38. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
  39. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
  40. /* required last entry */
  41. { 0, }
  42. };
  43. MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
  44. u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
  45. {
  46. struct fm10k_intfc *interface = hw->back;
  47. u16 value = 0;
  48. if (FM10K_REMOVED(hw->hw_addr))
  49. return ~value;
  50. pci_read_config_word(interface->pdev, reg, &value);
  51. if (value == 0xFFFF)
  52. fm10k_write_flush(hw);
  53. return value;
  54. }
  55. u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
  56. {
  57. u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  58. u32 value = 0;
  59. if (FM10K_REMOVED(hw_addr))
  60. return ~value;
  61. value = readl(&hw_addr[reg]);
  62. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  63. struct fm10k_intfc *interface = hw->back;
  64. struct net_device *netdev = interface->netdev;
  65. hw->hw_addr = NULL;
  66. netif_device_detach(netdev);
  67. netdev_err(netdev, "PCIe link lost, device now detached\n");
  68. }
  69. return value;
  70. }
  71. static int fm10k_hw_ready(struct fm10k_intfc *interface)
  72. {
  73. struct fm10k_hw *hw = &interface->hw;
  74. fm10k_write_flush(hw);
  75. return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
  76. }
  77. /**
  78. * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task
  79. * @interface: fm10k private interface structure
  80. *
  81. * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be
  82. * started immediately, request that it be restarted when possible.
  83. */
  84. void fm10k_macvlan_schedule(struct fm10k_intfc *interface)
  85. {
  86. /* Avoid processing the MAC/VLAN queue when the service task is
  87. * disabled, or when we're resetting the device.
  88. */
  89. if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) &&
  90. !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) {
  91. clear_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  92. /* We delay the actual start of execution in order to allow
  93. * multiple MAC/VLAN updates to accumulate before handling
  94. * them, and to allow some time to let the mailbox drain
  95. * between runs.
  96. */
  97. queue_delayed_work(fm10k_workqueue,
  98. &interface->macvlan_task, 10);
  99. } else {
  100. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  101. }
  102. }
  103. /**
  104. * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor
  105. * @interface: fm10k private interface structure
  106. *
  107. * Wait until the MAC/VLAN queue task has stopped, and cancel any future
  108. * requests.
  109. */
  110. static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface)
  111. {
  112. /* Disable the MAC/VLAN work item */
  113. set_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  114. /* Make sure we waited until any current invocations have stopped */
  115. cancel_delayed_work_sync(&interface->macvlan_task);
  116. /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task.
  117. * However, it may not be unset of the MAC/VLAN task never actually
  118. * got a chance to run. Since we've canceled the task here, and it
  119. * cannot be rescheuled right now, we need to ensure the scheduled bit
  120. * gets unset.
  121. */
  122. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  123. }
  124. /**
  125. * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor
  126. * @interface: fm10k private interface structure
  127. *
  128. * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule
  129. * the MAC/VLAN work monitor.
  130. */
  131. static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface)
  132. {
  133. /* Re-enable the MAC/VLAN work item */
  134. clear_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  135. /* We might have received a MAC/VLAN request while disabled. If so,
  136. * kick off the queue now.
  137. */
  138. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  139. fm10k_macvlan_schedule(interface);
  140. }
  141. void fm10k_service_event_schedule(struct fm10k_intfc *interface)
  142. {
  143. if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
  144. !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
  145. clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
  146. queue_work(fm10k_workqueue, &interface->service_task);
  147. } else {
  148. set_bit(__FM10K_SERVICE_REQUEST, interface->state);
  149. }
  150. }
  151. static void fm10k_service_event_complete(struct fm10k_intfc *interface)
  152. {
  153. WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
  154. /* flush memory to make sure state is correct before next watchog */
  155. smp_mb__before_atomic();
  156. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  157. /* If a service event was requested since we started, immediately
  158. * re-schedule now. This ensures we don't drop a request until the
  159. * next timer event.
  160. */
  161. if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
  162. fm10k_service_event_schedule(interface);
  163. }
  164. static void fm10k_stop_service_event(struct fm10k_intfc *interface)
  165. {
  166. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  167. cancel_work_sync(&interface->service_task);
  168. /* It's possible that cancel_work_sync stopped the service task from
  169. * running before it could actually start. In this case the
  170. * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that
  171. * the service task cannot be running at this point, we need to clear
  172. * the scheduled bit, as otherwise the service task may never be
  173. * restarted.
  174. */
  175. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  176. }
  177. static void fm10k_start_service_event(struct fm10k_intfc *interface)
  178. {
  179. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  180. fm10k_service_event_schedule(interface);
  181. }
  182. /**
  183. * fm10k_service_timer - Timer Call-back
  184. * @data: pointer to interface cast into an unsigned long
  185. **/
  186. static void fm10k_service_timer(struct timer_list *t)
  187. {
  188. struct fm10k_intfc *interface = from_timer(interface, t,
  189. service_timer);
  190. /* Reset the timer */
  191. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  192. fm10k_service_event_schedule(interface);
  193. }
  194. /**
  195. * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset
  196. * @interface: fm10k private data structure
  197. *
  198. * This function prepares for a device reset by shutting as much down as we
  199. * can. It does nothing and returns false if __FM10K_RESETTING was already set
  200. * prior to calling this function. It returns true if it actually did work.
  201. */
  202. static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
  203. {
  204. struct net_device *netdev = interface->netdev;
  205. WARN_ON(in_interrupt());
  206. /* put off any impending NetWatchDogTimeout */
  207. netif_trans_update(netdev);
  208. /* Nothing to do if a reset is already in progress */
  209. if (test_and_set_bit(__FM10K_RESETTING, interface->state))
  210. return false;
  211. /* As the MAC/VLAN task will be accessing registers it must not be
  212. * running while we reset. Although the task will not be scheduled
  213. * once we start resetting it may already be running
  214. */
  215. fm10k_stop_macvlan_task(interface);
  216. rtnl_lock();
  217. fm10k_iov_suspend(interface->pdev);
  218. if (netif_running(netdev))
  219. fm10k_close(netdev);
  220. fm10k_mbx_free_irq(interface);
  221. /* free interrupts */
  222. fm10k_clear_queueing_scheme(interface);
  223. /* delay any future reset requests */
  224. interface->last_reset = jiffies + (10 * HZ);
  225. rtnl_unlock();
  226. return true;
  227. }
  228. static int fm10k_handle_reset(struct fm10k_intfc *interface)
  229. {
  230. struct net_device *netdev = interface->netdev;
  231. struct fm10k_hw *hw = &interface->hw;
  232. int err;
  233. WARN_ON(!test_bit(__FM10K_RESETTING, interface->state));
  234. rtnl_lock();
  235. pci_set_master(interface->pdev);
  236. /* reset and initialize the hardware so it is in a known state */
  237. err = hw->mac.ops.reset_hw(hw);
  238. if (err) {
  239. dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
  240. goto reinit_err;
  241. }
  242. err = hw->mac.ops.init_hw(hw);
  243. if (err) {
  244. dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
  245. goto reinit_err;
  246. }
  247. err = fm10k_init_queueing_scheme(interface);
  248. if (err) {
  249. dev_err(&interface->pdev->dev,
  250. "init_queueing_scheme failed: %d\n", err);
  251. goto reinit_err;
  252. }
  253. /* re-associate interrupts */
  254. err = fm10k_mbx_request_irq(interface);
  255. if (err)
  256. goto err_mbx_irq;
  257. err = fm10k_hw_ready(interface);
  258. if (err)
  259. goto err_open;
  260. /* update hardware address for VFs if perm_addr has changed */
  261. if (hw->mac.type == fm10k_mac_vf) {
  262. if (is_valid_ether_addr(hw->mac.perm_addr)) {
  263. ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
  264. ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
  265. ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
  266. netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
  267. }
  268. if (hw->mac.vlan_override)
  269. netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
  270. else
  271. netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  272. }
  273. err = netif_running(netdev) ? fm10k_open(netdev) : 0;
  274. if (err)
  275. goto err_open;
  276. fm10k_iov_resume(interface->pdev);
  277. rtnl_unlock();
  278. fm10k_resume_macvlan_task(interface);
  279. clear_bit(__FM10K_RESETTING, interface->state);
  280. return err;
  281. err_open:
  282. fm10k_mbx_free_irq(interface);
  283. err_mbx_irq:
  284. fm10k_clear_queueing_scheme(interface);
  285. reinit_err:
  286. netif_device_detach(netdev);
  287. rtnl_unlock();
  288. clear_bit(__FM10K_RESETTING, interface->state);
  289. return err;
  290. }
  291. static void fm10k_detach_subtask(struct fm10k_intfc *interface)
  292. {
  293. struct net_device *netdev = interface->netdev;
  294. u32 __iomem *hw_addr;
  295. u32 value;
  296. int err;
  297. /* do nothing if netdev is still present or hw_addr is set */
  298. if (netif_device_present(netdev) || interface->hw.hw_addr)
  299. return;
  300. /* We've lost the PCIe register space, and can no longer access the
  301. * device. Shut everything except the detach subtask down and prepare
  302. * to reset the device in case we recover. If we actually prepare for
  303. * reset, indicate that we're detached.
  304. */
  305. if (fm10k_prepare_for_reset(interface))
  306. set_bit(__FM10K_RESET_DETACHED, interface->state);
  307. /* check the real address space to see if we've recovered */
  308. hw_addr = READ_ONCE(interface->uc_addr);
  309. value = readl(hw_addr);
  310. if (~value) {
  311. /* Make sure the reset was initiated because we detached,
  312. * otherwise we might race with a different reset flow.
  313. */
  314. if (!test_and_clear_bit(__FM10K_RESET_DETACHED,
  315. interface->state))
  316. return;
  317. /* Restore the hardware address */
  318. interface->hw.hw_addr = interface->uc_addr;
  319. /* PCIe link has been restored, and the device is active
  320. * again. Restore everything and reset the device.
  321. */
  322. err = fm10k_handle_reset(interface);
  323. if (err) {
  324. netdev_err(netdev, "Unable to reset device: %d\n", err);
  325. interface->hw.hw_addr = NULL;
  326. return;
  327. }
  328. /* Re-attach the netdev */
  329. netif_device_attach(netdev);
  330. netdev_warn(netdev, "PCIe link restored, device now attached\n");
  331. return;
  332. }
  333. }
  334. static void fm10k_reset_subtask(struct fm10k_intfc *interface)
  335. {
  336. int err;
  337. if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
  338. interface->flags))
  339. return;
  340. /* If another thread has already prepared to reset the device, we
  341. * should not attempt to handle a reset here, since we'd race with
  342. * that thread. This may happen if we suspend the device or if the
  343. * PCIe link is lost. In this case, we'll just ignore the RESET
  344. * request, as it will (eventually) be taken care of when the thread
  345. * which actually started the reset is finished.
  346. */
  347. if (!fm10k_prepare_for_reset(interface))
  348. return;
  349. netdev_err(interface->netdev, "Reset interface\n");
  350. err = fm10k_handle_reset(interface);
  351. if (err)
  352. dev_err(&interface->pdev->dev,
  353. "fm10k_handle_reset failed: %d\n", err);
  354. }
  355. /**
  356. * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
  357. * @interface: board private structure
  358. *
  359. * Configure the SWPRI to PC mapping for the port.
  360. **/
  361. static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
  362. {
  363. struct net_device *netdev = interface->netdev;
  364. struct fm10k_hw *hw = &interface->hw;
  365. int i;
  366. /* clear flag indicating update is needed */
  367. clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
  368. /* these registers are only available on the PF */
  369. if (hw->mac.type != fm10k_mac_pf)
  370. return;
  371. /* configure SWPRI to PC map */
  372. for (i = 0; i < FM10K_SWPRI_MAX; i++)
  373. fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
  374. netdev_get_prio_tc_map(netdev, i));
  375. }
  376. /**
  377. * fm10k_watchdog_update_host_state - Update the link status based on host.
  378. * @interface: board private structure
  379. **/
  380. static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
  381. {
  382. struct fm10k_hw *hw = &interface->hw;
  383. s32 err;
  384. if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
  385. interface->host_ready = false;
  386. if (time_is_after_jiffies(interface->link_down_event))
  387. return;
  388. clear_bit(__FM10K_LINK_DOWN, interface->state);
  389. }
  390. if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
  391. if (rtnl_trylock()) {
  392. fm10k_configure_swpri_map(interface);
  393. rtnl_unlock();
  394. }
  395. }
  396. /* lock the mailbox for transmit and receive */
  397. fm10k_mbx_lock(interface);
  398. err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
  399. if (err && time_is_before_jiffies(interface->last_reset))
  400. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  401. /* free the lock */
  402. fm10k_mbx_unlock(interface);
  403. }
  404. /**
  405. * fm10k_mbx_subtask - Process upstream and downstream mailboxes
  406. * @interface: board private structure
  407. *
  408. * This function will process both the upstream and downstream mailboxes.
  409. **/
  410. static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
  411. {
  412. /* If we're resetting, bail out */
  413. if (test_bit(__FM10K_RESETTING, interface->state))
  414. return;
  415. /* process upstream mailbox and update device state */
  416. fm10k_watchdog_update_host_state(interface);
  417. /* process downstream mailboxes */
  418. fm10k_iov_mbx(interface);
  419. }
  420. /**
  421. * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
  422. * @interface: board private structure
  423. **/
  424. static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
  425. {
  426. struct net_device *netdev = interface->netdev;
  427. /* only continue if link state is currently down */
  428. if (netif_carrier_ok(netdev))
  429. return;
  430. netif_info(interface, drv, netdev, "NIC Link is up\n");
  431. netif_carrier_on(netdev);
  432. netif_tx_wake_all_queues(netdev);
  433. }
  434. /**
  435. * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
  436. * @interface: board private structure
  437. **/
  438. static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
  439. {
  440. struct net_device *netdev = interface->netdev;
  441. /* only continue if link state is currently up */
  442. if (!netif_carrier_ok(netdev))
  443. return;
  444. netif_info(interface, drv, netdev, "NIC Link is down\n");
  445. netif_carrier_off(netdev);
  446. netif_tx_stop_all_queues(netdev);
  447. }
  448. /**
  449. * fm10k_update_stats - Update the board statistics counters.
  450. * @interface: board private structure
  451. **/
  452. void fm10k_update_stats(struct fm10k_intfc *interface)
  453. {
  454. struct net_device_stats *net_stats = &interface->netdev->stats;
  455. struct fm10k_hw *hw = &interface->hw;
  456. u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
  457. u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
  458. u64 rx_link_errors = 0;
  459. u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
  460. u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
  461. u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
  462. u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
  463. u64 bytes, pkts;
  464. int i;
  465. /* ensure only one thread updates stats at a time */
  466. if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  467. return;
  468. /* do not allow stats update via service task for next second */
  469. interface->next_stats_update = jiffies + HZ;
  470. /* gather some stats to the interface struct that are per queue */
  471. for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
  472. struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
  473. if (!tx_ring)
  474. continue;
  475. restart_queue += tx_ring->tx_stats.restart_queue;
  476. tx_busy += tx_ring->tx_stats.tx_busy;
  477. tx_csum_errors += tx_ring->tx_stats.csum_err;
  478. bytes += tx_ring->stats.bytes;
  479. pkts += tx_ring->stats.packets;
  480. hw_csum_tx_good += tx_ring->tx_stats.csum_good;
  481. }
  482. interface->restart_queue = restart_queue;
  483. interface->tx_busy = tx_busy;
  484. net_stats->tx_bytes = bytes;
  485. net_stats->tx_packets = pkts;
  486. interface->tx_csum_errors = tx_csum_errors;
  487. interface->hw_csum_tx_good = hw_csum_tx_good;
  488. /* gather some stats to the interface struct that are per queue */
  489. for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
  490. struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
  491. if (!rx_ring)
  492. continue;
  493. bytes += rx_ring->stats.bytes;
  494. pkts += rx_ring->stats.packets;
  495. alloc_failed += rx_ring->rx_stats.alloc_failed;
  496. rx_csum_errors += rx_ring->rx_stats.csum_err;
  497. rx_errors += rx_ring->rx_stats.errors;
  498. hw_csum_rx_good += rx_ring->rx_stats.csum_good;
  499. rx_switch_errors += rx_ring->rx_stats.switch_errors;
  500. rx_drops += rx_ring->rx_stats.drops;
  501. rx_pp_errors += rx_ring->rx_stats.pp_errors;
  502. rx_link_errors += rx_ring->rx_stats.link_errors;
  503. rx_length_errors += rx_ring->rx_stats.length_errors;
  504. }
  505. net_stats->rx_bytes = bytes;
  506. net_stats->rx_packets = pkts;
  507. interface->alloc_failed = alloc_failed;
  508. interface->rx_csum_errors = rx_csum_errors;
  509. interface->hw_csum_rx_good = hw_csum_rx_good;
  510. interface->rx_switch_errors = rx_switch_errors;
  511. interface->rx_drops = rx_drops;
  512. interface->rx_pp_errors = rx_pp_errors;
  513. interface->rx_link_errors = rx_link_errors;
  514. interface->rx_length_errors = rx_length_errors;
  515. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  516. for (i = 0; i < hw->mac.max_queues; i++) {
  517. struct fm10k_hw_stats_q *q = &interface->stats.q[i];
  518. tx_bytes_nic += q->tx_bytes.count;
  519. tx_pkts_nic += q->tx_packets.count;
  520. rx_bytes_nic += q->rx_bytes.count;
  521. rx_pkts_nic += q->rx_packets.count;
  522. rx_drops_nic += q->rx_drops.count;
  523. }
  524. interface->tx_bytes_nic = tx_bytes_nic;
  525. interface->tx_packets_nic = tx_pkts_nic;
  526. interface->rx_bytes_nic = rx_bytes_nic;
  527. interface->rx_packets_nic = rx_pkts_nic;
  528. interface->rx_drops_nic = rx_drops_nic;
  529. /* Fill out the OS statistics structure */
  530. net_stats->rx_errors = rx_errors;
  531. net_stats->rx_dropped = interface->stats.nodesc_drop.count;
  532. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  533. }
  534. /**
  535. * fm10k_watchdog_flush_tx - flush queues on host not ready
  536. * @interface - pointer to the device interface structure
  537. **/
  538. static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
  539. {
  540. int some_tx_pending = 0;
  541. int i;
  542. /* nothing to do if carrier is up */
  543. if (netif_carrier_ok(interface->netdev))
  544. return;
  545. for (i = 0; i < interface->num_tx_queues; i++) {
  546. struct fm10k_ring *tx_ring = interface->tx_ring[i];
  547. if (tx_ring->next_to_use != tx_ring->next_to_clean) {
  548. some_tx_pending = 1;
  549. break;
  550. }
  551. }
  552. /* We've lost link, so the controller stops DMA, but we've got
  553. * queued Tx work that's never going to get done, so reset
  554. * controller to flush Tx.
  555. */
  556. if (some_tx_pending)
  557. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  558. }
  559. /**
  560. * fm10k_watchdog_subtask - check and bring link up
  561. * @interface - pointer to the device interface structure
  562. **/
  563. static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
  564. {
  565. /* if interface is down do nothing */
  566. if (test_bit(__FM10K_DOWN, interface->state) ||
  567. test_bit(__FM10K_RESETTING, interface->state))
  568. return;
  569. if (interface->host_ready)
  570. fm10k_watchdog_host_is_ready(interface);
  571. else
  572. fm10k_watchdog_host_not_ready(interface);
  573. /* update stats only once every second */
  574. if (time_is_before_jiffies(interface->next_stats_update))
  575. fm10k_update_stats(interface);
  576. /* flush any uncompleted work */
  577. fm10k_watchdog_flush_tx(interface);
  578. }
  579. /**
  580. * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
  581. * @interface - pointer to the device interface structure
  582. *
  583. * This function serves two purposes. First it strobes the interrupt lines
  584. * in order to make certain interrupts are occurring. Secondly it sets the
  585. * bits needed to check for TX hangs. As a result we should immediately
  586. * determine if a hang has occurred.
  587. */
  588. static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
  589. {
  590. int i;
  591. /* If we're down or resetting, just bail */
  592. if (test_bit(__FM10K_DOWN, interface->state) ||
  593. test_bit(__FM10K_RESETTING, interface->state))
  594. return;
  595. /* rate limit tx hang checks to only once every 2 seconds */
  596. if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
  597. return;
  598. interface->next_tx_hang_check = jiffies + (2 * HZ);
  599. if (netif_carrier_ok(interface->netdev)) {
  600. /* Force detection of hung controller */
  601. for (i = 0; i < interface->num_tx_queues; i++)
  602. set_check_for_tx_hang(interface->tx_ring[i]);
  603. /* Rearm all in-use q_vectors for immediate firing */
  604. for (i = 0; i < interface->num_q_vectors; i++) {
  605. struct fm10k_q_vector *qv = interface->q_vector[i];
  606. if (!qv->tx.count && !qv->rx.count)
  607. continue;
  608. writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
  609. }
  610. }
  611. }
  612. /**
  613. * fm10k_service_task - manages and runs subtasks
  614. * @work: pointer to work_struct containing our data
  615. **/
  616. static void fm10k_service_task(struct work_struct *work)
  617. {
  618. struct fm10k_intfc *interface;
  619. interface = container_of(work, struct fm10k_intfc, service_task);
  620. /* Check whether we're detached first */
  621. fm10k_detach_subtask(interface);
  622. /* tasks run even when interface is down */
  623. fm10k_mbx_subtask(interface);
  624. fm10k_reset_subtask(interface);
  625. /* tasks only run when interface is up */
  626. fm10k_watchdog_subtask(interface);
  627. fm10k_check_hang_subtask(interface);
  628. /* release lock on service events to allow scheduling next event */
  629. fm10k_service_event_complete(interface);
  630. }
  631. /**
  632. * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager
  633. * @work: pointer to work_struct containing our data
  634. *
  635. * This work item handles sending MAC/VLAN updates to the switch manager. When
  636. * the interface is up, it will attempt to queue mailbox messages to the
  637. * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the
  638. * mailbox is full, it will reschedule itself to try again in a short while.
  639. * This ensures that the driver does not overload the switch mailbox with too
  640. * many simultaneous requests, causing an unnecessary reset.
  641. **/
  642. static void fm10k_macvlan_task(struct work_struct *work)
  643. {
  644. struct fm10k_macvlan_request *item;
  645. struct fm10k_intfc *interface;
  646. struct delayed_work *dwork;
  647. struct list_head *requests;
  648. struct fm10k_hw *hw;
  649. unsigned long flags;
  650. dwork = to_delayed_work(work);
  651. interface = container_of(dwork, struct fm10k_intfc, macvlan_task);
  652. hw = &interface->hw;
  653. requests = &interface->macvlan_requests;
  654. do {
  655. /* Pop the first item off the list */
  656. spin_lock_irqsave(&interface->macvlan_lock, flags);
  657. item = list_first_entry_or_null(requests,
  658. struct fm10k_macvlan_request,
  659. list);
  660. if (item)
  661. list_del_init(&item->list);
  662. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  663. /* We have no more items to process */
  664. if (!item)
  665. goto done;
  666. fm10k_mbx_lock(interface);
  667. /* Check that we have plenty of space to send the message. We
  668. * want to ensure that the mailbox stays low enough to avoid a
  669. * change in the host state, otherwise we may see spurious
  670. * link up / link down notifications.
  671. */
  672. if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) {
  673. hw->mbx.ops.process(hw, &hw->mbx);
  674. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  675. fm10k_mbx_unlock(interface);
  676. /* Put the request back on the list */
  677. spin_lock_irqsave(&interface->macvlan_lock, flags);
  678. list_add(&item->list, requests);
  679. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  680. break;
  681. }
  682. switch (item->type) {
  683. case FM10K_MC_MAC_REQUEST:
  684. hw->mac.ops.update_mc_addr(hw,
  685. item->mac.glort,
  686. item->mac.addr,
  687. item->mac.vid,
  688. item->set);
  689. break;
  690. case FM10K_UC_MAC_REQUEST:
  691. hw->mac.ops.update_uc_addr(hw,
  692. item->mac.glort,
  693. item->mac.addr,
  694. item->mac.vid,
  695. item->set,
  696. 0);
  697. break;
  698. case FM10K_VLAN_REQUEST:
  699. hw->mac.ops.update_vlan(hw,
  700. item->vlan.vid,
  701. item->vlan.vsi,
  702. item->set);
  703. break;
  704. default:
  705. break;
  706. }
  707. fm10k_mbx_unlock(interface);
  708. /* Free the item now that we've sent the update */
  709. kfree(item);
  710. } while (true);
  711. done:
  712. WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state));
  713. /* flush memory to make sure state is correct */
  714. smp_mb__before_atomic();
  715. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  716. /* If a MAC/VLAN request was scheduled since we started, we should
  717. * re-schedule. However, there is no reason to re-schedule if there is
  718. * no work to do.
  719. */
  720. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  721. fm10k_macvlan_schedule(interface);
  722. }
  723. /**
  724. * fm10k_configure_tx_ring - Configure Tx ring after Reset
  725. * @interface: board private structure
  726. * @ring: structure containing ring specific data
  727. *
  728. * Configure the Tx descriptor ring after a reset.
  729. **/
  730. static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
  731. struct fm10k_ring *ring)
  732. {
  733. struct fm10k_hw *hw = &interface->hw;
  734. u64 tdba = ring->dma;
  735. u32 size = ring->count * sizeof(struct fm10k_tx_desc);
  736. u32 txint = FM10K_INT_MAP_DISABLE;
  737. u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
  738. u8 reg_idx = ring->reg_idx;
  739. /* disable queue to avoid issues while updating state */
  740. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
  741. fm10k_write_flush(hw);
  742. /* possible poll here to verify ring resources have been cleaned */
  743. /* set location and size for descriptor ring */
  744. fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
  745. fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
  746. fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
  747. /* reset head and tail pointers */
  748. fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
  749. fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
  750. /* store tail pointer */
  751. ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
  752. /* reset ntu and ntc to place SW in sync with hardware */
  753. ring->next_to_clean = 0;
  754. ring->next_to_use = 0;
  755. /* Map interrupt */
  756. if (ring->q_vector) {
  757. txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  758. txint |= FM10K_INT_MAP_TIMER0;
  759. }
  760. fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
  761. /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
  762. fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
  763. FM10K_PFVTCTL_FTAG_DESC_ENABLE);
  764. /* Initialize XPS */
  765. if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
  766. ring->q_vector)
  767. netif_set_xps_queue(ring->netdev,
  768. &ring->q_vector->affinity_mask,
  769. ring->queue_index);
  770. /* enable queue */
  771. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
  772. }
  773. /**
  774. * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
  775. * @interface: board private structure
  776. * @ring: structure containing ring specific data
  777. *
  778. * Verify the Tx descriptor ring is ready for transmit.
  779. **/
  780. static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
  781. struct fm10k_ring *ring)
  782. {
  783. struct fm10k_hw *hw = &interface->hw;
  784. int wait_loop = 10;
  785. u32 txdctl;
  786. u8 reg_idx = ring->reg_idx;
  787. /* if we are already enabled just exit */
  788. if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
  789. return;
  790. /* poll to verify queue is enabled */
  791. do {
  792. usleep_range(1000, 2000);
  793. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
  794. } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
  795. if (!wait_loop)
  796. netif_err(interface, drv, interface->netdev,
  797. "Could not enable Tx Queue %d\n", reg_idx);
  798. }
  799. /**
  800. * fm10k_configure_tx - Configure Transmit Unit after Reset
  801. * @interface: board private structure
  802. *
  803. * Configure the Tx unit of the MAC after a reset.
  804. **/
  805. static void fm10k_configure_tx(struct fm10k_intfc *interface)
  806. {
  807. int i;
  808. /* Setup the HW Tx Head and Tail descriptor pointers */
  809. for (i = 0; i < interface->num_tx_queues; i++)
  810. fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
  811. /* poll here to verify that Tx rings are now enabled */
  812. for (i = 0; i < interface->num_tx_queues; i++)
  813. fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
  814. }
  815. /**
  816. * fm10k_configure_rx_ring - Configure Rx ring after Reset
  817. * @interface: board private structure
  818. * @ring: structure containing ring specific data
  819. *
  820. * Configure the Rx descriptor ring after a reset.
  821. **/
  822. static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
  823. struct fm10k_ring *ring)
  824. {
  825. u64 rdba = ring->dma;
  826. struct fm10k_hw *hw = &interface->hw;
  827. u32 size = ring->count * sizeof(union fm10k_rx_desc);
  828. u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  829. u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
  830. u32 rxint = FM10K_INT_MAP_DISABLE;
  831. u8 rx_pause = interface->rx_pause;
  832. u8 reg_idx = ring->reg_idx;
  833. /* disable queue to avoid issues while updating state */
  834. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  835. rxqctl &= ~FM10K_RXQCTL_ENABLE;
  836. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  837. fm10k_write_flush(hw);
  838. /* possible poll here to verify ring resources have been cleaned */
  839. /* set location and size for descriptor ring */
  840. fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
  841. fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
  842. fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
  843. /* reset head and tail pointers */
  844. fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
  845. fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
  846. /* store tail pointer */
  847. ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
  848. /* reset ntu and ntc to place SW in sync with hardware */
  849. ring->next_to_clean = 0;
  850. ring->next_to_use = 0;
  851. ring->next_to_alloc = 0;
  852. /* Configure the Rx buffer size for one buff without split */
  853. srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
  854. /* Configure the Rx ring to suppress loopback packets */
  855. srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
  856. fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
  857. /* Enable drop on empty */
  858. #ifdef CONFIG_DCB
  859. if (interface->pfc_en)
  860. rx_pause = interface->pfc_en;
  861. #endif
  862. if (!(rx_pause & BIT(ring->qos_pc)))
  863. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  864. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  865. /* assign default VLAN to queue */
  866. ring->vid = hw->mac.default_vid;
  867. /* if we have an active VLAN, disable default VLAN ID */
  868. if (test_bit(hw->mac.default_vid, interface->active_vlans))
  869. ring->vid |= FM10K_VLAN_CLEAR;
  870. /* Map interrupt */
  871. if (ring->q_vector) {
  872. rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  873. rxint |= FM10K_INT_MAP_TIMER1;
  874. }
  875. fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
  876. /* enable queue */
  877. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  878. rxqctl |= FM10K_RXQCTL_ENABLE;
  879. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  880. /* place buffers on ring for receive data */
  881. fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
  882. }
  883. /**
  884. * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
  885. * @interface: board private structure
  886. *
  887. * Configure the drop enable bits for the Rx rings.
  888. **/
  889. void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
  890. {
  891. struct fm10k_hw *hw = &interface->hw;
  892. u8 rx_pause = interface->rx_pause;
  893. int i;
  894. #ifdef CONFIG_DCB
  895. if (interface->pfc_en)
  896. rx_pause = interface->pfc_en;
  897. #endif
  898. for (i = 0; i < interface->num_rx_queues; i++) {
  899. struct fm10k_ring *ring = interface->rx_ring[i];
  900. u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  901. u8 reg_idx = ring->reg_idx;
  902. if (!(rx_pause & BIT(ring->qos_pc)))
  903. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  904. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  905. }
  906. }
  907. /**
  908. * fm10k_configure_dglort - Configure Receive DGLORT after reset
  909. * @interface: board private structure
  910. *
  911. * Configure the DGLORT description and RSS tables.
  912. **/
  913. static void fm10k_configure_dglort(struct fm10k_intfc *interface)
  914. {
  915. struct fm10k_dglort_cfg dglort = { 0 };
  916. struct fm10k_hw *hw = &interface->hw;
  917. int i;
  918. u32 mrqc;
  919. /* Fill out hash function seeds */
  920. for (i = 0; i < FM10K_RSSRK_SIZE; i++)
  921. fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
  922. /* Write RETA table to hardware */
  923. for (i = 0; i < FM10K_RETA_SIZE; i++)
  924. fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
  925. /* Generate RSS hash based on packet types, TCP/UDP
  926. * port numbers and/or IPv4/v6 src and dst addresses
  927. */
  928. mrqc = FM10K_MRQC_IPV4 |
  929. FM10K_MRQC_TCP_IPV4 |
  930. FM10K_MRQC_IPV6 |
  931. FM10K_MRQC_TCP_IPV6;
  932. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
  933. mrqc |= FM10K_MRQC_UDP_IPV4;
  934. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
  935. mrqc |= FM10K_MRQC_UDP_IPV6;
  936. fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
  937. /* configure default DGLORT mapping for RSS/DCB */
  938. dglort.inner_rss = 1;
  939. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  940. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  941. hw->mac.ops.configure_dglort_map(hw, &dglort);
  942. /* assign GLORT per queue for queue mapped testing */
  943. if (interface->glort_count > 64) {
  944. memset(&dglort, 0, sizeof(dglort));
  945. dglort.inner_rss = 1;
  946. dglort.glort = interface->glort + 64;
  947. dglort.idx = fm10k_dglort_pf_queue;
  948. dglort.queue_l = fls(interface->num_rx_queues - 1);
  949. hw->mac.ops.configure_dglort_map(hw, &dglort);
  950. }
  951. /* assign glort value for RSS/DCB specific to this interface */
  952. memset(&dglort, 0, sizeof(dglort));
  953. dglort.inner_rss = 1;
  954. dglort.glort = interface->glort;
  955. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  956. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  957. /* configure DGLORT mapping for RSS/DCB */
  958. dglort.idx = fm10k_dglort_pf_rss;
  959. if (interface->l2_accel)
  960. dglort.shared_l = fls(interface->l2_accel->size);
  961. hw->mac.ops.configure_dglort_map(hw, &dglort);
  962. }
  963. /**
  964. * fm10k_configure_rx - Configure Receive Unit after Reset
  965. * @interface: board private structure
  966. *
  967. * Configure the Rx unit of the MAC after a reset.
  968. **/
  969. static void fm10k_configure_rx(struct fm10k_intfc *interface)
  970. {
  971. int i;
  972. /* Configure SWPRI to PC map */
  973. fm10k_configure_swpri_map(interface);
  974. /* Configure RSS and DGLORT map */
  975. fm10k_configure_dglort(interface);
  976. /* Setup the HW Rx Head and Tail descriptor pointers */
  977. for (i = 0; i < interface->num_rx_queues; i++)
  978. fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
  979. /* possible poll here to verify that Rx rings are now enabled */
  980. }
  981. static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
  982. {
  983. struct fm10k_q_vector *q_vector;
  984. int q_idx;
  985. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  986. q_vector = interface->q_vector[q_idx];
  987. napi_enable(&q_vector->napi);
  988. }
  989. }
  990. static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
  991. {
  992. struct fm10k_q_vector *q_vector = data;
  993. if (q_vector->rx.count || q_vector->tx.count)
  994. napi_schedule_irqoff(&q_vector->napi);
  995. return IRQ_HANDLED;
  996. }
  997. static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
  998. {
  999. struct fm10k_intfc *interface = data;
  1000. struct fm10k_hw *hw = &interface->hw;
  1001. struct fm10k_mbx_info *mbx = &hw->mbx;
  1002. /* re-enable mailbox interrupt and indicate 20us delay */
  1003. fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
  1004. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  1005. FM10K_ITR_ENABLE);
  1006. /* service upstream mailbox */
  1007. if (fm10k_mbx_trylock(interface)) {
  1008. mbx->ops.process(hw, mbx);
  1009. fm10k_mbx_unlock(interface);
  1010. }
  1011. hw->mac.get_host_state = true;
  1012. fm10k_service_event_schedule(interface);
  1013. return IRQ_HANDLED;
  1014. }
  1015. #ifdef CONFIG_NET_POLL_CONTROLLER
  1016. /**
  1017. * fm10k_netpoll - A Polling 'interrupt' handler
  1018. * @netdev: network interface device structure
  1019. *
  1020. * This is used by netconsole to send skbs without having to re-enable
  1021. * interrupts. It's not called while the normal interrupt routine is executing.
  1022. **/
  1023. void fm10k_netpoll(struct net_device *netdev)
  1024. {
  1025. struct fm10k_intfc *interface = netdev_priv(netdev);
  1026. int i;
  1027. /* if interface is down do nothing */
  1028. if (test_bit(__FM10K_DOWN, interface->state))
  1029. return;
  1030. for (i = 0; i < interface->num_q_vectors; i++)
  1031. fm10k_msix_clean_rings(0, interface->q_vector[i]);
  1032. }
  1033. #endif
  1034. #define FM10K_ERR_MSG(type) case (type): error = #type; break
  1035. static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
  1036. struct fm10k_fault *fault)
  1037. {
  1038. struct pci_dev *pdev = interface->pdev;
  1039. struct fm10k_hw *hw = &interface->hw;
  1040. struct fm10k_iov_data *iov_data = interface->iov_data;
  1041. char *error;
  1042. switch (type) {
  1043. case FM10K_PCA_FAULT:
  1044. switch (fault->type) {
  1045. default:
  1046. error = "Unknown PCA error";
  1047. break;
  1048. FM10K_ERR_MSG(PCA_NO_FAULT);
  1049. FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
  1050. FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
  1051. FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
  1052. FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
  1053. FM10K_ERR_MSG(PCA_POISONED_TLP);
  1054. FM10K_ERR_MSG(PCA_TLP_ABORT);
  1055. }
  1056. break;
  1057. case FM10K_THI_FAULT:
  1058. switch (fault->type) {
  1059. default:
  1060. error = "Unknown THI error";
  1061. break;
  1062. FM10K_ERR_MSG(THI_NO_FAULT);
  1063. FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
  1064. }
  1065. break;
  1066. case FM10K_FUM_FAULT:
  1067. switch (fault->type) {
  1068. default:
  1069. error = "Unknown FUM error";
  1070. break;
  1071. FM10K_ERR_MSG(FUM_NO_FAULT);
  1072. FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
  1073. FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
  1074. FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
  1075. FM10K_ERR_MSG(FUM_RO_ERROR);
  1076. FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
  1077. FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
  1078. FM10K_ERR_MSG(FUM_INVALID_TYPE);
  1079. FM10K_ERR_MSG(FUM_INVALID_LENGTH);
  1080. FM10K_ERR_MSG(FUM_INVALID_BE);
  1081. FM10K_ERR_MSG(FUM_INVALID_ALIGN);
  1082. }
  1083. break;
  1084. default:
  1085. error = "Undocumented fault";
  1086. break;
  1087. }
  1088. dev_warn(&pdev->dev,
  1089. "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
  1090. error, fault->address, fault->specinfo,
  1091. PCI_SLOT(fault->func), PCI_FUNC(fault->func));
  1092. /* For VF faults, clear out the respective LPORT, reset the queue
  1093. * resources, and then reconnect to the mailbox. This allows the
  1094. * VF in question to resume behavior. For transient faults that are
  1095. * the result of non-malicious behavior this will log the fault and
  1096. * allow the VF to resume functionality. Obviously for malicious VFs
  1097. * they will be able to attempt malicious behavior again. In this
  1098. * case, the system administrator will need to step in and manually
  1099. * remove or disable the VF in question.
  1100. */
  1101. if (fault->func && iov_data) {
  1102. int vf = fault->func - 1;
  1103. struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
  1104. hw->iov.ops.reset_lport(hw, vf_info);
  1105. hw->iov.ops.reset_resources(hw, vf_info);
  1106. /* reset_lport disables the VF, so re-enable it */
  1107. hw->iov.ops.set_lport(hw, vf_info, vf,
  1108. FM10K_VF_FLAG_MULTI_CAPABLE);
  1109. /* reset_resources will disconnect from the mbx */
  1110. vf_info->mbx.ops.connect(hw, &vf_info->mbx);
  1111. }
  1112. }
  1113. static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
  1114. {
  1115. struct fm10k_hw *hw = &interface->hw;
  1116. struct fm10k_fault fault = { 0 };
  1117. int type, err;
  1118. for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
  1119. eicr;
  1120. eicr >>= 1, type += FM10K_FAULT_SIZE) {
  1121. /* only check if there is an error reported */
  1122. if (!(eicr & 0x1))
  1123. continue;
  1124. /* retrieve fault info */
  1125. err = hw->mac.ops.get_fault(hw, type, &fault);
  1126. if (err) {
  1127. dev_err(&interface->pdev->dev,
  1128. "error reading fault\n");
  1129. continue;
  1130. }
  1131. fm10k_handle_fault(interface, type, &fault);
  1132. }
  1133. }
  1134. static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
  1135. {
  1136. struct fm10k_hw *hw = &interface->hw;
  1137. const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  1138. u32 maxholdq;
  1139. int q;
  1140. if (!(eicr & FM10K_EICR_MAXHOLDTIME))
  1141. return;
  1142. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
  1143. if (maxholdq)
  1144. fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
  1145. for (q = 255;;) {
  1146. if (maxholdq & BIT(31)) {
  1147. if (q < FM10K_MAX_QUEUES_PF) {
  1148. interface->rx_overrun_pf++;
  1149. fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
  1150. } else {
  1151. interface->rx_overrun_vf++;
  1152. }
  1153. }
  1154. maxholdq *= 2;
  1155. if (!maxholdq)
  1156. q &= ~(32 - 1);
  1157. if (!q)
  1158. break;
  1159. if (q-- % 32)
  1160. continue;
  1161. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
  1162. if (maxholdq)
  1163. fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
  1164. }
  1165. }
  1166. static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
  1167. {
  1168. struct fm10k_intfc *interface = data;
  1169. struct fm10k_hw *hw = &interface->hw;
  1170. struct fm10k_mbx_info *mbx = &hw->mbx;
  1171. u32 eicr;
  1172. s32 err = 0;
  1173. /* unmask any set bits related to this interrupt */
  1174. eicr = fm10k_read_reg(hw, FM10K_EICR);
  1175. fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
  1176. FM10K_EICR_SWITCHREADY |
  1177. FM10K_EICR_SWITCHNOTREADY));
  1178. /* report any faults found to the message log */
  1179. fm10k_report_fault(interface, eicr);
  1180. /* reset any queues disabled due to receiver overrun */
  1181. fm10k_reset_drop_on_empty(interface, eicr);
  1182. /* service mailboxes */
  1183. if (fm10k_mbx_trylock(interface)) {
  1184. err = mbx->ops.process(hw, mbx);
  1185. /* handle VFLRE events */
  1186. fm10k_iov_event(interface);
  1187. fm10k_mbx_unlock(interface);
  1188. }
  1189. if (err == FM10K_ERR_RESET_REQUESTED)
  1190. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1191. /* if switch toggled state we should reset GLORTs */
  1192. if (eicr & FM10K_EICR_SWITCHNOTREADY) {
  1193. /* force link down for at least 4 seconds */
  1194. interface->link_down_event = jiffies + (4 * HZ);
  1195. set_bit(__FM10K_LINK_DOWN, interface->state);
  1196. /* reset dglort_map back to no config */
  1197. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1198. }
  1199. /* we should validate host state after interrupt event */
  1200. hw->mac.get_host_state = true;
  1201. /* validate host state, and handle VF mailboxes in the service task */
  1202. fm10k_service_event_schedule(interface);
  1203. /* re-enable mailbox interrupt and indicate 20us delay */
  1204. fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
  1205. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  1206. FM10K_ITR_ENABLE);
  1207. return IRQ_HANDLED;
  1208. }
  1209. void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
  1210. {
  1211. struct fm10k_hw *hw = &interface->hw;
  1212. struct msix_entry *entry;
  1213. int itr_reg;
  1214. /* no mailbox IRQ to free if MSI-X is not enabled */
  1215. if (!interface->msix_entries)
  1216. return;
  1217. entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1218. /* disconnect the mailbox */
  1219. hw->mbx.ops.disconnect(hw, &hw->mbx);
  1220. /* disable Mailbox cause */
  1221. if (hw->mac.type == fm10k_mac_pf) {
  1222. fm10k_write_reg(hw, FM10K_EIMR,
  1223. FM10K_EIMR_DISABLE(PCA_FAULT) |
  1224. FM10K_EIMR_DISABLE(FUM_FAULT) |
  1225. FM10K_EIMR_DISABLE(MAILBOX) |
  1226. FM10K_EIMR_DISABLE(SWITCHREADY) |
  1227. FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
  1228. FM10K_EIMR_DISABLE(SRAMERROR) |
  1229. FM10K_EIMR_DISABLE(VFLR) |
  1230. FM10K_EIMR_DISABLE(MAXHOLDTIME));
  1231. itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
  1232. } else {
  1233. itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR);
  1234. }
  1235. fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
  1236. free_irq(entry->vector, interface);
  1237. }
  1238. static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
  1239. struct fm10k_mbx_info *mbx)
  1240. {
  1241. bool vlan_override = hw->mac.vlan_override;
  1242. u16 default_vid = hw->mac.default_vid;
  1243. struct fm10k_intfc *interface;
  1244. s32 err;
  1245. err = fm10k_msg_mac_vlan_vf(hw, results, mbx);
  1246. if (err)
  1247. return err;
  1248. interface = container_of(hw, struct fm10k_intfc, hw);
  1249. /* MAC was changed so we need reset */
  1250. if (is_valid_ether_addr(hw->mac.perm_addr) &&
  1251. !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
  1252. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1253. /* VLAN override was changed, or default VLAN changed */
  1254. if ((vlan_override != hw->mac.vlan_override) ||
  1255. (default_vid != hw->mac.default_vid))
  1256. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1257. return 0;
  1258. }
  1259. /* generic error handler for mailbox issues */
  1260. static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
  1261. struct fm10k_mbx_info __always_unused *mbx)
  1262. {
  1263. struct fm10k_intfc *interface;
  1264. struct pci_dev *pdev;
  1265. interface = container_of(hw, struct fm10k_intfc, hw);
  1266. pdev = interface->pdev;
  1267. dev_err(&pdev->dev, "Unknown message ID %u\n",
  1268. **results & FM10K_TLV_ID_MASK);
  1269. return 0;
  1270. }
  1271. static const struct fm10k_msg_data vf_mbx_data[] = {
  1272. FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
  1273. FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
  1274. FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
  1275. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1276. };
  1277. static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
  1278. {
  1279. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1280. struct net_device *dev = interface->netdev;
  1281. struct fm10k_hw *hw = &interface->hw;
  1282. int err;
  1283. /* Use timer0 for interrupt moderation on the mailbox */
  1284. u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1285. /* register mailbox handlers */
  1286. err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
  1287. if (err)
  1288. return err;
  1289. /* request the IRQ */
  1290. err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0,
  1291. dev->name, interface);
  1292. if (err) {
  1293. netif_err(interface, probe, dev,
  1294. "request_irq for msix_mbx failed: %d\n", err);
  1295. return err;
  1296. }
  1297. /* map all of the interrupt sources */
  1298. fm10k_write_reg(hw, FM10K_VFINT_MAP, itr);
  1299. /* enable interrupt */
  1300. fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE);
  1301. return 0;
  1302. }
  1303. static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
  1304. struct fm10k_mbx_info *mbx)
  1305. {
  1306. struct fm10k_intfc *interface;
  1307. u32 dglort_map = hw->mac.dglort_map;
  1308. s32 err;
  1309. interface = container_of(hw, struct fm10k_intfc, hw);
  1310. err = fm10k_msg_err_pf(hw, results, mbx);
  1311. if (!err && hw->swapi.status) {
  1312. /* force link down for a reasonable delay */
  1313. interface->link_down_event = jiffies + (2 * HZ);
  1314. set_bit(__FM10K_LINK_DOWN, interface->state);
  1315. /* reset dglort_map back to no config */
  1316. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1317. fm10k_service_event_schedule(interface);
  1318. /* prevent overloading kernel message buffer */
  1319. if (interface->lport_map_failed)
  1320. return 0;
  1321. interface->lport_map_failed = true;
  1322. if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
  1323. dev_warn(&interface->pdev->dev,
  1324. "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
  1325. dev_warn(&interface->pdev->dev,
  1326. "request logical port map failed: %d\n",
  1327. hw->swapi.status);
  1328. return 0;
  1329. }
  1330. err = fm10k_msg_lport_map_pf(hw, results, mbx);
  1331. if (err)
  1332. return err;
  1333. interface->lport_map_failed = false;
  1334. /* we need to reset if port count was just updated */
  1335. if (dglort_map != hw->mac.dglort_map)
  1336. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1337. return 0;
  1338. }
  1339. static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
  1340. struct fm10k_mbx_info __always_unused *mbx)
  1341. {
  1342. struct fm10k_intfc *interface;
  1343. u16 glort, pvid;
  1344. u32 pvid_update;
  1345. s32 err;
  1346. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
  1347. &pvid_update);
  1348. if (err)
  1349. return err;
  1350. /* extract values from the pvid update */
  1351. glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
  1352. pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
  1353. /* if glort is not valid return error */
  1354. if (!fm10k_glort_valid_pf(hw, glort))
  1355. return FM10K_ERR_PARAM;
  1356. /* verify VLAN ID is valid */
  1357. if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
  1358. return FM10K_ERR_PARAM;
  1359. interface = container_of(hw, struct fm10k_intfc, hw);
  1360. /* check to see if this belongs to one of the VFs */
  1361. err = fm10k_iov_update_pvid(interface, glort, pvid);
  1362. if (!err)
  1363. return 0;
  1364. /* we need to reset if default VLAN was just updated */
  1365. if (pvid != hw->mac.default_vid)
  1366. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1367. hw->mac.default_vid = pvid;
  1368. return 0;
  1369. }
  1370. static const struct fm10k_msg_data pf_mbx_data[] = {
  1371. FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
  1372. FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
  1373. FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
  1374. FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
  1375. FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
  1376. FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
  1377. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1378. };
  1379. static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
  1380. {
  1381. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1382. struct net_device *dev = interface->netdev;
  1383. struct fm10k_hw *hw = &interface->hw;
  1384. int err;
  1385. /* Use timer0 for interrupt moderation on the mailbox */
  1386. u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1387. u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
  1388. /* register mailbox handlers */
  1389. err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
  1390. if (err)
  1391. return err;
  1392. /* request the IRQ */
  1393. err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
  1394. dev->name, interface);
  1395. if (err) {
  1396. netif_err(interface, probe, dev,
  1397. "request_irq for msix_mbx failed: %d\n", err);
  1398. return err;
  1399. }
  1400. /* Enable interrupts w/ no moderation for "other" interrupts */
  1401. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr);
  1402. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr);
  1403. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr);
  1404. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr);
  1405. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr);
  1406. /* Enable interrupts w/ moderation for mailbox */
  1407. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr);
  1408. /* Enable individual interrupt causes */
  1409. fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
  1410. FM10K_EIMR_ENABLE(FUM_FAULT) |
  1411. FM10K_EIMR_ENABLE(MAILBOX) |
  1412. FM10K_EIMR_ENABLE(SWITCHREADY) |
  1413. FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
  1414. FM10K_EIMR_ENABLE(SRAMERROR) |
  1415. FM10K_EIMR_ENABLE(VFLR) |
  1416. FM10K_EIMR_ENABLE(MAXHOLDTIME));
  1417. /* enable interrupt */
  1418. fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
  1419. return 0;
  1420. }
  1421. int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
  1422. {
  1423. struct fm10k_hw *hw = &interface->hw;
  1424. int err;
  1425. /* enable Mailbox cause */
  1426. if (hw->mac.type == fm10k_mac_pf)
  1427. err = fm10k_mbx_request_irq_pf(interface);
  1428. else
  1429. err = fm10k_mbx_request_irq_vf(interface);
  1430. if (err)
  1431. return err;
  1432. /* connect mailbox */
  1433. err = hw->mbx.ops.connect(hw, &hw->mbx);
  1434. /* if the mailbox failed to connect, then free IRQ */
  1435. if (err)
  1436. fm10k_mbx_free_irq(interface);
  1437. return err;
  1438. }
  1439. /**
  1440. * fm10k_qv_free_irq - release interrupts associated with queue vectors
  1441. * @interface: board private structure
  1442. *
  1443. * Release all interrupts associated with this interface
  1444. **/
  1445. void fm10k_qv_free_irq(struct fm10k_intfc *interface)
  1446. {
  1447. int vector = interface->num_q_vectors;
  1448. struct fm10k_hw *hw = &interface->hw;
  1449. struct msix_entry *entry;
  1450. entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
  1451. while (vector) {
  1452. struct fm10k_q_vector *q_vector;
  1453. vector--;
  1454. entry--;
  1455. q_vector = interface->q_vector[vector];
  1456. if (!q_vector->tx.count && !q_vector->rx.count)
  1457. continue;
  1458. /* clear the affinity_mask in the IRQ descriptor */
  1459. irq_set_affinity_hint(entry->vector, NULL);
  1460. /* disable interrupts */
  1461. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1462. free_irq(entry->vector, q_vector);
  1463. }
  1464. }
  1465. /**
  1466. * fm10k_qv_request_irq - initialize interrupts for queue vectors
  1467. * @interface: board private structure
  1468. *
  1469. * Attempts to configure interrupts using the best available
  1470. * capabilities of the hardware and kernel.
  1471. **/
  1472. int fm10k_qv_request_irq(struct fm10k_intfc *interface)
  1473. {
  1474. struct net_device *dev = interface->netdev;
  1475. struct fm10k_hw *hw = &interface->hw;
  1476. struct msix_entry *entry;
  1477. unsigned int ri = 0, ti = 0;
  1478. int vector, err;
  1479. entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
  1480. for (vector = 0; vector < interface->num_q_vectors; vector++) {
  1481. struct fm10k_q_vector *q_vector = interface->q_vector[vector];
  1482. /* name the vector */
  1483. if (q_vector->tx.count && q_vector->rx.count) {
  1484. snprintf(q_vector->name, sizeof(q_vector->name),
  1485. "%s-TxRx-%u", dev->name, ri++);
  1486. ti++;
  1487. } else if (q_vector->rx.count) {
  1488. snprintf(q_vector->name, sizeof(q_vector->name),
  1489. "%s-rx-%u", dev->name, ri++);
  1490. } else if (q_vector->tx.count) {
  1491. snprintf(q_vector->name, sizeof(q_vector->name),
  1492. "%s-tx-%u", dev->name, ti++);
  1493. } else {
  1494. /* skip this unused q_vector */
  1495. continue;
  1496. }
  1497. /* Assign ITR register to q_vector */
  1498. q_vector->itr = (hw->mac.type == fm10k_mac_pf) ?
  1499. &interface->uc_addr[FM10K_ITR(entry->entry)] :
  1500. &interface->uc_addr[FM10K_VFITR(entry->entry)];
  1501. /* request the IRQ */
  1502. err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
  1503. q_vector->name, q_vector);
  1504. if (err) {
  1505. netif_err(interface, probe, dev,
  1506. "request_irq failed for MSIX interrupt Error: %d\n",
  1507. err);
  1508. goto err_out;
  1509. }
  1510. /* assign the mask for this irq */
  1511. irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
  1512. /* Enable q_vector */
  1513. writel(FM10K_ITR_ENABLE, q_vector->itr);
  1514. entry++;
  1515. }
  1516. return 0;
  1517. err_out:
  1518. /* wind through the ring freeing all entries and vectors */
  1519. while (vector) {
  1520. struct fm10k_q_vector *q_vector;
  1521. entry--;
  1522. vector--;
  1523. q_vector = interface->q_vector[vector];
  1524. if (!q_vector->tx.count && !q_vector->rx.count)
  1525. continue;
  1526. /* clear the affinity_mask in the IRQ descriptor */
  1527. irq_set_affinity_hint(entry->vector, NULL);
  1528. /* disable interrupts */
  1529. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1530. free_irq(entry->vector, q_vector);
  1531. }
  1532. return err;
  1533. }
  1534. void fm10k_up(struct fm10k_intfc *interface)
  1535. {
  1536. struct fm10k_hw *hw = &interface->hw;
  1537. /* Enable Tx/Rx DMA */
  1538. hw->mac.ops.start_hw(hw);
  1539. /* configure Tx descriptor rings */
  1540. fm10k_configure_tx(interface);
  1541. /* configure Rx descriptor rings */
  1542. fm10k_configure_rx(interface);
  1543. /* configure interrupts */
  1544. hw->mac.ops.update_int_moderator(hw);
  1545. /* enable statistics capture again */
  1546. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  1547. /* clear down bit to indicate we are ready to go */
  1548. clear_bit(__FM10K_DOWN, interface->state);
  1549. /* enable polling cleanups */
  1550. fm10k_napi_enable_all(interface);
  1551. /* re-establish Rx filters */
  1552. fm10k_restore_rx_state(interface);
  1553. /* enable transmits */
  1554. netif_tx_start_all_queues(interface->netdev);
  1555. /* kick off the service timer now */
  1556. hw->mac.get_host_state = true;
  1557. mod_timer(&interface->service_timer, jiffies);
  1558. }
  1559. static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
  1560. {
  1561. struct fm10k_q_vector *q_vector;
  1562. int q_idx;
  1563. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  1564. q_vector = interface->q_vector[q_idx];
  1565. napi_disable(&q_vector->napi);
  1566. }
  1567. }
  1568. void fm10k_down(struct fm10k_intfc *interface)
  1569. {
  1570. struct net_device *netdev = interface->netdev;
  1571. struct fm10k_hw *hw = &interface->hw;
  1572. int err, i = 0, count = 0;
  1573. /* signal that we are down to the interrupt handler and service task */
  1574. if (test_and_set_bit(__FM10K_DOWN, interface->state))
  1575. return;
  1576. /* call carrier off first to avoid false dev_watchdog timeouts */
  1577. netif_carrier_off(netdev);
  1578. /* disable transmits */
  1579. netif_tx_stop_all_queues(netdev);
  1580. netif_tx_disable(netdev);
  1581. /* reset Rx filters */
  1582. fm10k_reset_rx_state(interface);
  1583. /* disable polling routines */
  1584. fm10k_napi_disable_all(interface);
  1585. /* capture stats one last time before stopping interface */
  1586. fm10k_update_stats(interface);
  1587. /* prevent updating statistics while we're down */
  1588. while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  1589. usleep_range(1000, 2000);
  1590. /* skip waiting for TX DMA if we lost PCIe link */
  1591. if (FM10K_REMOVED(hw->hw_addr))
  1592. goto skip_tx_dma_drain;
  1593. /* In some rare circumstances it can take a while for Tx queues to
  1594. * quiesce and be fully disabled. Attempt to .stop_hw() first, and
  1595. * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
  1596. * until the Tx queues have emptied, or until a number of retries. If
  1597. * we fail to clear within the retry loop, we will issue a warning
  1598. * indicating that Tx DMA is probably hung. Note this means we call
  1599. * .stop_hw() twice but this shouldn't cause any problems.
  1600. */
  1601. err = hw->mac.ops.stop_hw(hw);
  1602. if (err != FM10K_ERR_REQUESTS_PENDING)
  1603. goto skip_tx_dma_drain;
  1604. #define TX_DMA_DRAIN_RETRIES 25
  1605. for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
  1606. usleep_range(10000, 20000);
  1607. /* start checking at the last ring to have pending Tx */
  1608. for (; i < interface->num_tx_queues; i++)
  1609. if (fm10k_get_tx_pending(interface->tx_ring[i], false))
  1610. break;
  1611. /* if all the queues are drained, we can break now */
  1612. if (i == interface->num_tx_queues)
  1613. break;
  1614. }
  1615. if (count >= TX_DMA_DRAIN_RETRIES)
  1616. dev_err(&interface->pdev->dev,
  1617. "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
  1618. count);
  1619. skip_tx_dma_drain:
  1620. /* Disable DMA engine for Tx/Rx */
  1621. err = hw->mac.ops.stop_hw(hw);
  1622. if (err == FM10K_ERR_REQUESTS_PENDING)
  1623. dev_err(&interface->pdev->dev,
  1624. "due to pending requests hw was not shut down gracefully\n");
  1625. else if (err)
  1626. dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
  1627. /* free any buffers still on the rings */
  1628. fm10k_clean_all_tx_rings(interface);
  1629. fm10k_clean_all_rx_rings(interface);
  1630. }
  1631. /**
  1632. * fm10k_sw_init - Initialize general software structures
  1633. * @interface: host interface private structure to initialize
  1634. *
  1635. * fm10k_sw_init initializes the interface private data structure.
  1636. * Fields are initialized based on PCI device information and
  1637. * OS network device settings (MTU size).
  1638. **/
  1639. static int fm10k_sw_init(struct fm10k_intfc *interface,
  1640. const struct pci_device_id *ent)
  1641. {
  1642. const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
  1643. struct fm10k_hw *hw = &interface->hw;
  1644. struct pci_dev *pdev = interface->pdev;
  1645. struct net_device *netdev = interface->netdev;
  1646. u32 rss_key[FM10K_RSSRK_SIZE];
  1647. unsigned int rss;
  1648. int err;
  1649. /* initialize back pointer */
  1650. hw->back = interface;
  1651. hw->hw_addr = interface->uc_addr;
  1652. /* PCI config space info */
  1653. hw->vendor_id = pdev->vendor;
  1654. hw->device_id = pdev->device;
  1655. hw->revision_id = pdev->revision;
  1656. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1657. hw->subsystem_device_id = pdev->subsystem_device;
  1658. /* Setup hw api */
  1659. memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
  1660. hw->mac.type = fi->mac;
  1661. /* Setup IOV handlers */
  1662. if (fi->iov_ops)
  1663. memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
  1664. /* Set common capability flags and settings */
  1665. rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
  1666. interface->ring_feature[RING_F_RSS].limit = rss;
  1667. fi->get_invariants(hw);
  1668. /* pick up the PCIe bus settings for reporting later */
  1669. if (hw->mac.ops.get_bus_info)
  1670. hw->mac.ops.get_bus_info(hw);
  1671. /* limit the usable DMA range */
  1672. if (hw->mac.ops.set_dma_mask)
  1673. hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
  1674. /* update netdev with DMA restrictions */
  1675. if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
  1676. netdev->features |= NETIF_F_HIGHDMA;
  1677. netdev->vlan_features |= NETIF_F_HIGHDMA;
  1678. }
  1679. /* reset and initialize the hardware so it is in a known state */
  1680. err = hw->mac.ops.reset_hw(hw);
  1681. if (err) {
  1682. dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
  1683. return err;
  1684. }
  1685. err = hw->mac.ops.init_hw(hw);
  1686. if (err) {
  1687. dev_err(&pdev->dev, "init_hw failed: %d\n", err);
  1688. return err;
  1689. }
  1690. /* initialize hardware statistics */
  1691. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  1692. /* Set upper limit on IOV VFs that can be allocated */
  1693. pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
  1694. /* Start with random Ethernet address */
  1695. eth_random_addr(hw->mac.addr);
  1696. /* Initialize MAC address from hardware */
  1697. err = hw->mac.ops.read_mac_addr(hw);
  1698. if (err) {
  1699. dev_warn(&pdev->dev,
  1700. "Failed to obtain MAC address defaulting to random\n");
  1701. /* tag address assignment as random */
  1702. netdev->addr_assign_type |= NET_ADDR_RANDOM;
  1703. }
  1704. ether_addr_copy(netdev->dev_addr, hw->mac.addr);
  1705. ether_addr_copy(netdev->perm_addr, hw->mac.addr);
  1706. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1707. dev_err(&pdev->dev, "Invalid MAC Address\n");
  1708. return -EIO;
  1709. }
  1710. /* initialize DCBNL interface */
  1711. fm10k_dcbnl_set_ops(netdev);
  1712. /* set default ring sizes */
  1713. interface->tx_ring_count = FM10K_DEFAULT_TXD;
  1714. interface->rx_ring_count = FM10K_DEFAULT_RXD;
  1715. /* set default interrupt moderation */
  1716. interface->tx_itr = FM10K_TX_ITR_DEFAULT;
  1717. interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
  1718. /* initialize udp port lists */
  1719. INIT_LIST_HEAD(&interface->vxlan_port);
  1720. INIT_LIST_HEAD(&interface->geneve_port);
  1721. /* Initialize the MAC/VLAN queue */
  1722. INIT_LIST_HEAD(&interface->macvlan_requests);
  1723. netdev_rss_key_fill(rss_key, sizeof(rss_key));
  1724. memcpy(interface->rssrk, rss_key, sizeof(rss_key));
  1725. /* Initialize the mailbox lock */
  1726. spin_lock_init(&interface->mbx_lock);
  1727. spin_lock_init(&interface->macvlan_lock);
  1728. /* Start off interface as being down */
  1729. set_bit(__FM10K_DOWN, interface->state);
  1730. set_bit(__FM10K_UPDATING_STATS, interface->state);
  1731. return 0;
  1732. }
  1733. static void fm10k_slot_warn(struct fm10k_intfc *interface)
  1734. {
  1735. enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
  1736. enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
  1737. struct fm10k_hw *hw = &interface->hw;
  1738. int max_gts = 0, expected_gts = 0;
  1739. if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
  1740. speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
  1741. dev_warn(&interface->pdev->dev,
  1742. "Unable to determine PCI Express bandwidth.\n");
  1743. return;
  1744. }
  1745. switch (speed) {
  1746. case PCIE_SPEED_2_5GT:
  1747. /* 8b/10b encoding reduces max throughput by 20% */
  1748. max_gts = 2 * width;
  1749. break;
  1750. case PCIE_SPEED_5_0GT:
  1751. /* 8b/10b encoding reduces max throughput by 20% */
  1752. max_gts = 4 * width;
  1753. break;
  1754. case PCIE_SPEED_8_0GT:
  1755. /* 128b/130b encoding has less than 2% impact on throughput */
  1756. max_gts = 8 * width;
  1757. break;
  1758. default:
  1759. dev_warn(&interface->pdev->dev,
  1760. "Unable to determine PCI Express bandwidth.\n");
  1761. return;
  1762. }
  1763. dev_info(&interface->pdev->dev,
  1764. "PCI Express bandwidth of %dGT/s available\n",
  1765. max_gts);
  1766. dev_info(&interface->pdev->dev,
  1767. "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
  1768. (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
  1769. speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
  1770. speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
  1771. "Unknown"),
  1772. hw->bus.width,
  1773. (speed == PCIE_SPEED_2_5GT ? "20%" :
  1774. speed == PCIE_SPEED_5_0GT ? "20%" :
  1775. speed == PCIE_SPEED_8_0GT ? "<2%" :
  1776. "Unknown"),
  1777. (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
  1778. hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
  1779. hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
  1780. "Unknown"));
  1781. switch (hw->bus_caps.speed) {
  1782. case fm10k_bus_speed_2500:
  1783. /* 8b/10b encoding reduces max throughput by 20% */
  1784. expected_gts = 2 * hw->bus_caps.width;
  1785. break;
  1786. case fm10k_bus_speed_5000:
  1787. /* 8b/10b encoding reduces max throughput by 20% */
  1788. expected_gts = 4 * hw->bus_caps.width;
  1789. break;
  1790. case fm10k_bus_speed_8000:
  1791. /* 128b/130b encoding has less than 2% impact on throughput */
  1792. expected_gts = 8 * hw->bus_caps.width;
  1793. break;
  1794. default:
  1795. dev_warn(&interface->pdev->dev,
  1796. "Unable to determine expected PCI Express bandwidth.\n");
  1797. return;
  1798. }
  1799. if (max_gts >= expected_gts)
  1800. return;
  1801. dev_warn(&interface->pdev->dev,
  1802. "This device requires %dGT/s of bandwidth for optimal performance.\n",
  1803. expected_gts);
  1804. dev_warn(&interface->pdev->dev,
  1805. "A %sslot with x%d lanes is suggested.\n",
  1806. (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
  1807. hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
  1808. hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
  1809. hw->bus_caps.width);
  1810. }
  1811. /**
  1812. * fm10k_probe - Device Initialization Routine
  1813. * @pdev: PCI device information struct
  1814. * @ent: entry in fm10k_pci_tbl
  1815. *
  1816. * Returns 0 on success, negative on failure
  1817. *
  1818. * fm10k_probe initializes an interface identified by a pci_dev structure.
  1819. * The OS initialization, configuring of the interface private structure,
  1820. * and a hardware reset occur.
  1821. **/
  1822. static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1823. {
  1824. struct net_device *netdev;
  1825. struct fm10k_intfc *interface;
  1826. int err;
  1827. if (pdev->error_state != pci_channel_io_normal) {
  1828. dev_err(&pdev->dev,
  1829. "PCI device still in an error state. Unable to load...\n");
  1830. return -EIO;
  1831. }
  1832. err = pci_enable_device_mem(pdev);
  1833. if (err) {
  1834. dev_err(&pdev->dev,
  1835. "PCI enable device failed: %d\n", err);
  1836. return err;
  1837. }
  1838. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
  1839. if (err)
  1840. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1841. if (err) {
  1842. dev_err(&pdev->dev,
  1843. "DMA configuration failed: %d\n", err);
  1844. goto err_dma;
  1845. }
  1846. err = pci_request_mem_regions(pdev, fm10k_driver_name);
  1847. if (err) {
  1848. dev_err(&pdev->dev,
  1849. "pci_request_selected_regions failed: %d\n", err);
  1850. goto err_pci_reg;
  1851. }
  1852. pci_enable_pcie_error_reporting(pdev);
  1853. pci_set_master(pdev);
  1854. pci_save_state(pdev);
  1855. netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]);
  1856. if (!netdev) {
  1857. err = -ENOMEM;
  1858. goto err_alloc_netdev;
  1859. }
  1860. SET_NETDEV_DEV(netdev, &pdev->dev);
  1861. interface = netdev_priv(netdev);
  1862. pci_set_drvdata(pdev, interface);
  1863. interface->netdev = netdev;
  1864. interface->pdev = pdev;
  1865. interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
  1866. FM10K_UC_ADDR_SIZE);
  1867. if (!interface->uc_addr) {
  1868. err = -EIO;
  1869. goto err_ioremap;
  1870. }
  1871. err = fm10k_sw_init(interface, ent);
  1872. if (err)
  1873. goto err_sw_init;
  1874. /* enable debugfs support */
  1875. fm10k_dbg_intfc_init(interface);
  1876. err = fm10k_init_queueing_scheme(interface);
  1877. if (err)
  1878. goto err_sw_init;
  1879. /* the mbx interrupt might attempt to schedule the service task, so we
  1880. * must ensure it is disabled since we haven't yet requested the timer
  1881. * or work item.
  1882. */
  1883. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1884. err = fm10k_mbx_request_irq(interface);
  1885. if (err)
  1886. goto err_mbx_interrupt;
  1887. /* final check of hardware state before registering the interface */
  1888. err = fm10k_hw_ready(interface);
  1889. if (err)
  1890. goto err_register;
  1891. err = register_netdev(netdev);
  1892. if (err)
  1893. goto err_register;
  1894. /* carrier off reporting is important to ethtool even BEFORE open */
  1895. netif_carrier_off(netdev);
  1896. /* stop all the transmit queues from transmitting until link is up */
  1897. netif_tx_stop_all_queues(netdev);
  1898. /* Initialize service timer and service task late in order to avoid
  1899. * cleanup issues.
  1900. */
  1901. timer_setup(&interface->service_timer, fm10k_service_timer, 0);
  1902. INIT_WORK(&interface->service_task, fm10k_service_task);
  1903. /* Setup the MAC/VLAN queue */
  1904. INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task);
  1905. /* kick off service timer now, even when interface is down */
  1906. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  1907. /* print warning for non-optimal configurations */
  1908. fm10k_slot_warn(interface);
  1909. /* report MAC address for logging */
  1910. dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
  1911. /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
  1912. fm10k_iov_configure(pdev, 0);
  1913. /* clear the service task disable bit and kick off service task */
  1914. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1915. fm10k_service_event_schedule(interface);
  1916. return 0;
  1917. err_register:
  1918. fm10k_mbx_free_irq(interface);
  1919. err_mbx_interrupt:
  1920. fm10k_clear_queueing_scheme(interface);
  1921. err_sw_init:
  1922. if (interface->sw_addr)
  1923. iounmap(interface->sw_addr);
  1924. iounmap(interface->uc_addr);
  1925. err_ioremap:
  1926. free_netdev(netdev);
  1927. err_alloc_netdev:
  1928. pci_release_mem_regions(pdev);
  1929. err_pci_reg:
  1930. err_dma:
  1931. pci_disable_device(pdev);
  1932. return err;
  1933. }
  1934. /**
  1935. * fm10k_remove - Device Removal Routine
  1936. * @pdev: PCI device information struct
  1937. *
  1938. * fm10k_remove is called by the PCI subsystem to alert the driver
  1939. * that it should release a PCI device. The could be caused by a
  1940. * Hot-Plug event, or because the driver is going to be removed from
  1941. * memory.
  1942. **/
  1943. static void fm10k_remove(struct pci_dev *pdev)
  1944. {
  1945. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1946. struct net_device *netdev = interface->netdev;
  1947. del_timer_sync(&interface->service_timer);
  1948. fm10k_stop_service_event(interface);
  1949. fm10k_stop_macvlan_task(interface);
  1950. /* Remove all pending MAC/VLAN requests */
  1951. fm10k_clear_macvlan_queue(interface, interface->glort, true);
  1952. /* free netdev, this may bounce the interrupts due to setup_tc */
  1953. if (netdev->reg_state == NETREG_REGISTERED)
  1954. unregister_netdev(netdev);
  1955. /* release VFs */
  1956. fm10k_iov_disable(pdev);
  1957. /* disable mailbox interrupt */
  1958. fm10k_mbx_free_irq(interface);
  1959. /* free interrupts */
  1960. fm10k_clear_queueing_scheme(interface);
  1961. /* remove any debugfs interfaces */
  1962. fm10k_dbg_intfc_exit(interface);
  1963. if (interface->sw_addr)
  1964. iounmap(interface->sw_addr);
  1965. iounmap(interface->uc_addr);
  1966. free_netdev(netdev);
  1967. pci_release_mem_regions(pdev);
  1968. pci_disable_pcie_error_reporting(pdev);
  1969. pci_disable_device(pdev);
  1970. }
  1971. static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
  1972. {
  1973. /* the watchdog task reads from registers, which might appear like
  1974. * a surprise remove if the PCIe device is disabled while we're
  1975. * stopped. We stop the watchdog task until after we resume software
  1976. * activity.
  1977. *
  1978. * Note that the MAC/VLAN task will be stopped as part of preparing
  1979. * for reset so we don't need to handle it here.
  1980. */
  1981. fm10k_stop_service_event(interface);
  1982. if (fm10k_prepare_for_reset(interface))
  1983. set_bit(__FM10K_RESET_SUSPENDED, interface->state);
  1984. }
  1985. static int fm10k_handle_resume(struct fm10k_intfc *interface)
  1986. {
  1987. struct fm10k_hw *hw = &interface->hw;
  1988. int err;
  1989. /* Even if we didn't properly prepare for reset in
  1990. * fm10k_prepare_suspend, we'll attempt to resume anyways.
  1991. */
  1992. if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state))
  1993. dev_warn(&interface->pdev->dev,
  1994. "Device was shut down as part of suspend... Attempting to recover\n");
  1995. /* reset statistics starting values */
  1996. hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
  1997. err = fm10k_handle_reset(interface);
  1998. if (err)
  1999. return err;
  2000. /* assume host is not ready, to prevent race with watchdog in case we
  2001. * actually don't have connection to the switch
  2002. */
  2003. interface->host_ready = false;
  2004. fm10k_watchdog_host_not_ready(interface);
  2005. /* force link to stay down for a second to prevent link flutter */
  2006. interface->link_down_event = jiffies + (HZ);
  2007. set_bit(__FM10K_LINK_DOWN, interface->state);
  2008. /* restart the service task */
  2009. fm10k_start_service_event(interface);
  2010. /* Restart the MAC/VLAN request queue in-case of outstanding events */
  2011. fm10k_macvlan_schedule(interface);
  2012. return err;
  2013. }
  2014. #ifdef CONFIG_PM
  2015. /**
  2016. * fm10k_resume - Generic PM resume hook
  2017. * @dev: generic device structure
  2018. *
  2019. * Generic PM hook used when waking the device from a low power state after
  2020. * suspend or hibernation. This function does not need to handle lower PCIe
  2021. * device state as the stack takes care of that for us.
  2022. **/
  2023. static int fm10k_resume(struct device *dev)
  2024. {
  2025. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  2026. struct net_device *netdev = interface->netdev;
  2027. struct fm10k_hw *hw = &interface->hw;
  2028. int err;
  2029. /* refresh hw_addr in case it was dropped */
  2030. hw->hw_addr = interface->uc_addr;
  2031. err = fm10k_handle_resume(interface);
  2032. if (err)
  2033. return err;
  2034. netif_device_attach(netdev);
  2035. return 0;
  2036. }
  2037. /**
  2038. * fm10k_suspend - Generic PM suspend hook
  2039. * @dev: generic device structure
  2040. *
  2041. * Generic PM hook used when setting the device into a low power state for
  2042. * system suspend or hibernation. This function does not need to handle lower
  2043. * PCIe device state as the stack takes care of that for us.
  2044. **/
  2045. static int fm10k_suspend(struct device *dev)
  2046. {
  2047. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  2048. struct net_device *netdev = interface->netdev;
  2049. netif_device_detach(netdev);
  2050. fm10k_prepare_suspend(interface);
  2051. return 0;
  2052. }
  2053. #endif /* CONFIG_PM */
  2054. /**
  2055. * fm10k_io_error_detected - called when PCI error is detected
  2056. * @pdev: Pointer to PCI device
  2057. * @state: The current pci connection state
  2058. *
  2059. * This function is called after a PCI bus error affecting
  2060. * this device has been detected.
  2061. */
  2062. static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
  2063. pci_channel_state_t state)
  2064. {
  2065. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2066. struct net_device *netdev = interface->netdev;
  2067. netif_device_detach(netdev);
  2068. if (state == pci_channel_io_perm_failure)
  2069. return PCI_ERS_RESULT_DISCONNECT;
  2070. fm10k_prepare_suspend(interface);
  2071. /* Request a slot reset. */
  2072. return PCI_ERS_RESULT_NEED_RESET;
  2073. }
  2074. /**
  2075. * fm10k_io_slot_reset - called after the pci bus has been reset.
  2076. * @pdev: Pointer to PCI device
  2077. *
  2078. * Restart the card from scratch, as if from a cold-boot.
  2079. */
  2080. static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
  2081. {
  2082. pci_ers_result_t result;
  2083. if (pci_reenable_device(pdev)) {
  2084. dev_err(&pdev->dev,
  2085. "Cannot re-enable PCI device after reset.\n");
  2086. result = PCI_ERS_RESULT_DISCONNECT;
  2087. } else {
  2088. pci_set_master(pdev);
  2089. pci_restore_state(pdev);
  2090. /* After second error pci->state_saved is false, this
  2091. * resets it so EEH doesn't break.
  2092. */
  2093. pci_save_state(pdev);
  2094. pci_wake_from_d3(pdev, false);
  2095. result = PCI_ERS_RESULT_RECOVERED;
  2096. }
  2097. pci_cleanup_aer_uncorrect_error_status(pdev);
  2098. return result;
  2099. }
  2100. /**
  2101. * fm10k_io_resume - called when traffic can start flowing again.
  2102. * @pdev: Pointer to PCI device
  2103. *
  2104. * This callback is called when the error recovery driver tells us that
  2105. * its OK to resume normal operation.
  2106. */
  2107. static void fm10k_io_resume(struct pci_dev *pdev)
  2108. {
  2109. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2110. struct net_device *netdev = interface->netdev;
  2111. int err;
  2112. err = fm10k_handle_resume(interface);
  2113. if (err)
  2114. dev_warn(&pdev->dev,
  2115. "%s failed: %d\n", __func__, err);
  2116. else
  2117. netif_device_attach(netdev);
  2118. }
  2119. /**
  2120. * fm10k_io_reset_prepare - called when PCI function is about to be reset
  2121. * @pdev: Pointer to PCI device
  2122. *
  2123. * This callback is called when the PCI function is about to be reset,
  2124. * allowing the device driver to prepare for it.
  2125. */
  2126. static void fm10k_io_reset_prepare(struct pci_dev *pdev)
  2127. {
  2128. /* warn incase we have any active VF devices */
  2129. if (pci_num_vf(pdev))
  2130. dev_warn(&pdev->dev,
  2131. "PCIe FLR may cause issues for any active VF devices\n");
  2132. fm10k_prepare_suspend(pci_get_drvdata(pdev));
  2133. }
  2134. /**
  2135. * fm10k_io_reset_done - called when PCI function has finished resetting
  2136. * @pdev: Pointer to PCI device
  2137. *
  2138. * This callback is called just after the PCI function is reset, such as via
  2139. * /sys/class/net/<enpX>/device/reset or similar.
  2140. */
  2141. static void fm10k_io_reset_done(struct pci_dev *pdev)
  2142. {
  2143. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2144. int err = fm10k_handle_resume(interface);
  2145. if (err) {
  2146. dev_warn(&pdev->dev,
  2147. "%s failed: %d\n", __func__, err);
  2148. netif_device_detach(interface->netdev);
  2149. }
  2150. }
  2151. static const struct pci_error_handlers fm10k_err_handler = {
  2152. .error_detected = fm10k_io_error_detected,
  2153. .slot_reset = fm10k_io_slot_reset,
  2154. .resume = fm10k_io_resume,
  2155. .reset_prepare = fm10k_io_reset_prepare,
  2156. .reset_done = fm10k_io_reset_done,
  2157. };
  2158. static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
  2159. static struct pci_driver fm10k_driver = {
  2160. .name = fm10k_driver_name,
  2161. .id_table = fm10k_pci_tbl,
  2162. .probe = fm10k_probe,
  2163. .remove = fm10k_remove,
  2164. #ifdef CONFIG_PM
  2165. .driver = {
  2166. .pm = &fm10k_pm_ops,
  2167. },
  2168. #endif /* CONFIG_PM */
  2169. .sriov_configure = fm10k_iov_configure,
  2170. .err_handler = &fm10k_err_handler
  2171. };
  2172. /**
  2173. * fm10k_register_pci_driver - register driver interface
  2174. *
  2175. * This function is called on module load in order to register the driver.
  2176. **/
  2177. int fm10k_register_pci_driver(void)
  2178. {
  2179. return pci_register_driver(&fm10k_driver);
  2180. }
  2181. /**
  2182. * fm10k_unregister_pci_driver - unregister driver interface
  2183. *
  2184. * This function is called on module unload in order to remove the driver.
  2185. **/
  2186. void fm10k_unregister_pci_driver(void)
  2187. {
  2188. pci_unregister_driver(&fm10k_driver);
  2189. }