fm10k_pci.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Intel(R) Ethernet Switch Host Interface Driver
  3. * Copyright(c) 2013 - 2018 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * The full GNU General Public License is included in this distribution in
  15. * the file called "COPYING".
  16. *
  17. * Contact Information:
  18. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. */
  21. #include <linux/module.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/aer.h>
  24. #include "fm10k.h"
  25. static const struct fm10k_info *fm10k_info_tbl[] = {
  26. [fm10k_device_pf] = &fm10k_pf_info,
  27. [fm10k_device_vf] = &fm10k_vf_info,
  28. };
  29. /*
  30. * fm10k_pci_tbl - PCI Device ID Table
  31. *
  32. * Wildcard entries (PCI_ANY_ID) should come last
  33. * Last entry must be all 0s
  34. *
  35. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  36. * Class, Class Mask, private data (not used) }
  37. */
  38. static const struct pci_device_id fm10k_pci_tbl[] = {
  39. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
  40. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
  41. /* required last entry */
  42. { 0, }
  43. };
  44. MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
  45. u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
  46. {
  47. struct fm10k_intfc *interface = hw->back;
  48. u16 value = 0;
  49. if (FM10K_REMOVED(hw->hw_addr))
  50. return ~value;
  51. pci_read_config_word(interface->pdev, reg, &value);
  52. if (value == 0xFFFF)
  53. fm10k_write_flush(hw);
  54. return value;
  55. }
  56. u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
  57. {
  58. u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  59. u32 value = 0;
  60. if (FM10K_REMOVED(hw_addr))
  61. return ~value;
  62. value = readl(&hw_addr[reg]);
  63. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  64. struct fm10k_intfc *interface = hw->back;
  65. struct net_device *netdev = interface->netdev;
  66. hw->hw_addr = NULL;
  67. netif_device_detach(netdev);
  68. netdev_err(netdev, "PCIe link lost, device now detached\n");
  69. }
  70. return value;
  71. }
  72. static int fm10k_hw_ready(struct fm10k_intfc *interface)
  73. {
  74. struct fm10k_hw *hw = &interface->hw;
  75. fm10k_write_flush(hw);
  76. return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
  77. }
  78. /**
  79. * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task
  80. * @interface: fm10k private interface structure
  81. *
  82. * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be
  83. * started immediately, request that it be restarted when possible.
  84. */
  85. void fm10k_macvlan_schedule(struct fm10k_intfc *interface)
  86. {
  87. /* Avoid processing the MAC/VLAN queue when the service task is
  88. * disabled, or when we're resetting the device.
  89. */
  90. if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) &&
  91. !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) {
  92. clear_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  93. /* We delay the actual start of execution in order to allow
  94. * multiple MAC/VLAN updates to accumulate before handling
  95. * them, and to allow some time to let the mailbox drain
  96. * between runs.
  97. */
  98. queue_delayed_work(fm10k_workqueue,
  99. &interface->macvlan_task, 10);
  100. } else {
  101. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  102. }
  103. }
  104. /**
  105. * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor
  106. * @interface: fm10k private interface structure
  107. *
  108. * Wait until the MAC/VLAN queue task has stopped, and cancel any future
  109. * requests.
  110. */
  111. static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface)
  112. {
  113. /* Disable the MAC/VLAN work item */
  114. set_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  115. /* Make sure we waited until any current invocations have stopped */
  116. cancel_delayed_work_sync(&interface->macvlan_task);
  117. /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task.
  118. * However, it may not be unset of the MAC/VLAN task never actually
  119. * got a chance to run. Since we've canceled the task here, and it
  120. * cannot be rescheuled right now, we need to ensure the scheduled bit
  121. * gets unset.
  122. */
  123. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  124. }
  125. /**
  126. * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor
  127. * @interface: fm10k private interface structure
  128. *
  129. * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule
  130. * the MAC/VLAN work monitor.
  131. */
  132. static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface)
  133. {
  134. /* Re-enable the MAC/VLAN work item */
  135. clear_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  136. /* We might have received a MAC/VLAN request while disabled. If so,
  137. * kick off the queue now.
  138. */
  139. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  140. fm10k_macvlan_schedule(interface);
  141. }
  142. void fm10k_service_event_schedule(struct fm10k_intfc *interface)
  143. {
  144. if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
  145. !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
  146. clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
  147. queue_work(fm10k_workqueue, &interface->service_task);
  148. } else {
  149. set_bit(__FM10K_SERVICE_REQUEST, interface->state);
  150. }
  151. }
  152. static void fm10k_service_event_complete(struct fm10k_intfc *interface)
  153. {
  154. WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
  155. /* flush memory to make sure state is correct before next watchog */
  156. smp_mb__before_atomic();
  157. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  158. /* If a service event was requested since we started, immediately
  159. * re-schedule now. This ensures we don't drop a request until the
  160. * next timer event.
  161. */
  162. if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
  163. fm10k_service_event_schedule(interface);
  164. }
  165. static void fm10k_stop_service_event(struct fm10k_intfc *interface)
  166. {
  167. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  168. cancel_work_sync(&interface->service_task);
  169. /* It's possible that cancel_work_sync stopped the service task from
  170. * running before it could actually start. In this case the
  171. * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that
  172. * the service task cannot be running at this point, we need to clear
  173. * the scheduled bit, as otherwise the service task may never be
  174. * restarted.
  175. */
  176. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  177. }
  178. static void fm10k_start_service_event(struct fm10k_intfc *interface)
  179. {
  180. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  181. fm10k_service_event_schedule(interface);
  182. }
  183. /**
  184. * fm10k_service_timer - Timer Call-back
  185. * @t: pointer to timer data
  186. **/
  187. static void fm10k_service_timer(struct timer_list *t)
  188. {
  189. struct fm10k_intfc *interface = from_timer(interface, t,
  190. service_timer);
  191. /* Reset the timer */
  192. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  193. fm10k_service_event_schedule(interface);
  194. }
  195. /**
  196. * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset
  197. * @interface: fm10k private data structure
  198. *
  199. * This function prepares for a device reset by shutting as much down as we
  200. * can. It does nothing and returns false if __FM10K_RESETTING was already set
  201. * prior to calling this function. It returns true if it actually did work.
  202. */
  203. static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
  204. {
  205. struct net_device *netdev = interface->netdev;
  206. WARN_ON(in_interrupt());
  207. /* put off any impending NetWatchDogTimeout */
  208. netif_trans_update(netdev);
  209. /* Nothing to do if a reset is already in progress */
  210. if (test_and_set_bit(__FM10K_RESETTING, interface->state))
  211. return false;
  212. /* As the MAC/VLAN task will be accessing registers it must not be
  213. * running while we reset. Although the task will not be scheduled
  214. * once we start resetting it may already be running
  215. */
  216. fm10k_stop_macvlan_task(interface);
  217. rtnl_lock();
  218. fm10k_iov_suspend(interface->pdev);
  219. if (netif_running(netdev))
  220. fm10k_close(netdev);
  221. fm10k_mbx_free_irq(interface);
  222. /* free interrupts */
  223. fm10k_clear_queueing_scheme(interface);
  224. /* delay any future reset requests */
  225. interface->last_reset = jiffies + (10 * HZ);
  226. rtnl_unlock();
  227. return true;
  228. }
  229. static int fm10k_handle_reset(struct fm10k_intfc *interface)
  230. {
  231. struct net_device *netdev = interface->netdev;
  232. struct fm10k_hw *hw = &interface->hw;
  233. int err;
  234. WARN_ON(!test_bit(__FM10K_RESETTING, interface->state));
  235. rtnl_lock();
  236. pci_set_master(interface->pdev);
  237. /* reset and initialize the hardware so it is in a known state */
  238. err = hw->mac.ops.reset_hw(hw);
  239. if (err) {
  240. dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
  241. goto reinit_err;
  242. }
  243. err = hw->mac.ops.init_hw(hw);
  244. if (err) {
  245. dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
  246. goto reinit_err;
  247. }
  248. err = fm10k_init_queueing_scheme(interface);
  249. if (err) {
  250. dev_err(&interface->pdev->dev,
  251. "init_queueing_scheme failed: %d\n", err);
  252. goto reinit_err;
  253. }
  254. /* re-associate interrupts */
  255. err = fm10k_mbx_request_irq(interface);
  256. if (err)
  257. goto err_mbx_irq;
  258. err = fm10k_hw_ready(interface);
  259. if (err)
  260. goto err_open;
  261. /* update hardware address for VFs if perm_addr has changed */
  262. if (hw->mac.type == fm10k_mac_vf) {
  263. if (is_valid_ether_addr(hw->mac.perm_addr)) {
  264. ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
  265. ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
  266. ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
  267. netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
  268. }
  269. if (hw->mac.vlan_override)
  270. netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
  271. else
  272. netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  273. }
  274. err = netif_running(netdev) ? fm10k_open(netdev) : 0;
  275. if (err)
  276. goto err_open;
  277. fm10k_iov_resume(interface->pdev);
  278. rtnl_unlock();
  279. fm10k_resume_macvlan_task(interface);
  280. clear_bit(__FM10K_RESETTING, interface->state);
  281. return err;
  282. err_open:
  283. fm10k_mbx_free_irq(interface);
  284. err_mbx_irq:
  285. fm10k_clear_queueing_scheme(interface);
  286. reinit_err:
  287. netif_device_detach(netdev);
  288. rtnl_unlock();
  289. clear_bit(__FM10K_RESETTING, interface->state);
  290. return err;
  291. }
  292. static void fm10k_detach_subtask(struct fm10k_intfc *interface)
  293. {
  294. struct net_device *netdev = interface->netdev;
  295. u32 __iomem *hw_addr;
  296. u32 value;
  297. int err;
  298. /* do nothing if netdev is still present or hw_addr is set */
  299. if (netif_device_present(netdev) || interface->hw.hw_addr)
  300. return;
  301. /* We've lost the PCIe register space, and can no longer access the
  302. * device. Shut everything except the detach subtask down and prepare
  303. * to reset the device in case we recover. If we actually prepare for
  304. * reset, indicate that we're detached.
  305. */
  306. if (fm10k_prepare_for_reset(interface))
  307. set_bit(__FM10K_RESET_DETACHED, interface->state);
  308. /* check the real address space to see if we've recovered */
  309. hw_addr = READ_ONCE(interface->uc_addr);
  310. value = readl(hw_addr);
  311. if (~value) {
  312. /* Make sure the reset was initiated because we detached,
  313. * otherwise we might race with a different reset flow.
  314. */
  315. if (!test_and_clear_bit(__FM10K_RESET_DETACHED,
  316. interface->state))
  317. return;
  318. /* Restore the hardware address */
  319. interface->hw.hw_addr = interface->uc_addr;
  320. /* PCIe link has been restored, and the device is active
  321. * again. Restore everything and reset the device.
  322. */
  323. err = fm10k_handle_reset(interface);
  324. if (err) {
  325. netdev_err(netdev, "Unable to reset device: %d\n", err);
  326. interface->hw.hw_addr = NULL;
  327. return;
  328. }
  329. /* Re-attach the netdev */
  330. netif_device_attach(netdev);
  331. netdev_warn(netdev, "PCIe link restored, device now attached\n");
  332. return;
  333. }
  334. }
  335. static void fm10k_reset_subtask(struct fm10k_intfc *interface)
  336. {
  337. int err;
  338. if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
  339. interface->flags))
  340. return;
  341. /* If another thread has already prepared to reset the device, we
  342. * should not attempt to handle a reset here, since we'd race with
  343. * that thread. This may happen if we suspend the device or if the
  344. * PCIe link is lost. In this case, we'll just ignore the RESET
  345. * request, as it will (eventually) be taken care of when the thread
  346. * which actually started the reset is finished.
  347. */
  348. if (!fm10k_prepare_for_reset(interface))
  349. return;
  350. netdev_err(interface->netdev, "Reset interface\n");
  351. err = fm10k_handle_reset(interface);
  352. if (err)
  353. dev_err(&interface->pdev->dev,
  354. "fm10k_handle_reset failed: %d\n", err);
  355. }
  356. /**
  357. * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
  358. * @interface: board private structure
  359. *
  360. * Configure the SWPRI to PC mapping for the port.
  361. **/
  362. static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
  363. {
  364. struct net_device *netdev = interface->netdev;
  365. struct fm10k_hw *hw = &interface->hw;
  366. int i;
  367. /* clear flag indicating update is needed */
  368. clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
  369. /* these registers are only available on the PF */
  370. if (hw->mac.type != fm10k_mac_pf)
  371. return;
  372. /* configure SWPRI to PC map */
  373. for (i = 0; i < FM10K_SWPRI_MAX; i++)
  374. fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
  375. netdev_get_prio_tc_map(netdev, i));
  376. }
  377. /**
  378. * fm10k_watchdog_update_host_state - Update the link status based on host.
  379. * @interface: board private structure
  380. **/
  381. static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
  382. {
  383. struct fm10k_hw *hw = &interface->hw;
  384. s32 err;
  385. if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
  386. interface->host_ready = false;
  387. if (time_is_after_jiffies(interface->link_down_event))
  388. return;
  389. clear_bit(__FM10K_LINK_DOWN, interface->state);
  390. }
  391. if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
  392. if (rtnl_trylock()) {
  393. fm10k_configure_swpri_map(interface);
  394. rtnl_unlock();
  395. }
  396. }
  397. /* lock the mailbox for transmit and receive */
  398. fm10k_mbx_lock(interface);
  399. err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
  400. if (err && time_is_before_jiffies(interface->last_reset))
  401. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  402. /* free the lock */
  403. fm10k_mbx_unlock(interface);
  404. }
  405. /**
  406. * fm10k_mbx_subtask - Process upstream and downstream mailboxes
  407. * @interface: board private structure
  408. *
  409. * This function will process both the upstream and downstream mailboxes.
  410. **/
  411. static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
  412. {
  413. /* If we're resetting, bail out */
  414. if (test_bit(__FM10K_RESETTING, interface->state))
  415. return;
  416. /* process upstream mailbox and update device state */
  417. fm10k_watchdog_update_host_state(interface);
  418. /* process downstream mailboxes */
  419. fm10k_iov_mbx(interface);
  420. }
  421. /**
  422. * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
  423. * @interface: board private structure
  424. **/
  425. static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
  426. {
  427. struct net_device *netdev = interface->netdev;
  428. /* only continue if link state is currently down */
  429. if (netif_carrier_ok(netdev))
  430. return;
  431. netif_info(interface, drv, netdev, "NIC Link is up\n");
  432. netif_carrier_on(netdev);
  433. netif_tx_wake_all_queues(netdev);
  434. }
  435. /**
  436. * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
  437. * @interface: board private structure
  438. **/
  439. static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
  440. {
  441. struct net_device *netdev = interface->netdev;
  442. /* only continue if link state is currently up */
  443. if (!netif_carrier_ok(netdev))
  444. return;
  445. netif_info(interface, drv, netdev, "NIC Link is down\n");
  446. netif_carrier_off(netdev);
  447. netif_tx_stop_all_queues(netdev);
  448. }
  449. /**
  450. * fm10k_update_stats - Update the board statistics counters.
  451. * @interface: board private structure
  452. **/
  453. void fm10k_update_stats(struct fm10k_intfc *interface)
  454. {
  455. struct net_device_stats *net_stats = &interface->netdev->stats;
  456. struct fm10k_hw *hw = &interface->hw;
  457. u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
  458. u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
  459. u64 rx_link_errors = 0;
  460. u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
  461. u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
  462. u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
  463. u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
  464. u64 bytes, pkts;
  465. int i;
  466. /* ensure only one thread updates stats at a time */
  467. if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  468. return;
  469. /* do not allow stats update via service task for next second */
  470. interface->next_stats_update = jiffies + HZ;
  471. /* gather some stats to the interface struct that are per queue */
  472. for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
  473. struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
  474. if (!tx_ring)
  475. continue;
  476. restart_queue += tx_ring->tx_stats.restart_queue;
  477. tx_busy += tx_ring->tx_stats.tx_busy;
  478. tx_csum_errors += tx_ring->tx_stats.csum_err;
  479. bytes += tx_ring->stats.bytes;
  480. pkts += tx_ring->stats.packets;
  481. hw_csum_tx_good += tx_ring->tx_stats.csum_good;
  482. }
  483. interface->restart_queue = restart_queue;
  484. interface->tx_busy = tx_busy;
  485. net_stats->tx_bytes = bytes;
  486. net_stats->tx_packets = pkts;
  487. interface->tx_csum_errors = tx_csum_errors;
  488. interface->hw_csum_tx_good = hw_csum_tx_good;
  489. /* gather some stats to the interface struct that are per queue */
  490. for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
  491. struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
  492. if (!rx_ring)
  493. continue;
  494. bytes += rx_ring->stats.bytes;
  495. pkts += rx_ring->stats.packets;
  496. alloc_failed += rx_ring->rx_stats.alloc_failed;
  497. rx_csum_errors += rx_ring->rx_stats.csum_err;
  498. rx_errors += rx_ring->rx_stats.errors;
  499. hw_csum_rx_good += rx_ring->rx_stats.csum_good;
  500. rx_switch_errors += rx_ring->rx_stats.switch_errors;
  501. rx_drops += rx_ring->rx_stats.drops;
  502. rx_pp_errors += rx_ring->rx_stats.pp_errors;
  503. rx_link_errors += rx_ring->rx_stats.link_errors;
  504. rx_length_errors += rx_ring->rx_stats.length_errors;
  505. }
  506. net_stats->rx_bytes = bytes;
  507. net_stats->rx_packets = pkts;
  508. interface->alloc_failed = alloc_failed;
  509. interface->rx_csum_errors = rx_csum_errors;
  510. interface->hw_csum_rx_good = hw_csum_rx_good;
  511. interface->rx_switch_errors = rx_switch_errors;
  512. interface->rx_drops = rx_drops;
  513. interface->rx_pp_errors = rx_pp_errors;
  514. interface->rx_link_errors = rx_link_errors;
  515. interface->rx_length_errors = rx_length_errors;
  516. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  517. for (i = 0; i < hw->mac.max_queues; i++) {
  518. struct fm10k_hw_stats_q *q = &interface->stats.q[i];
  519. tx_bytes_nic += q->tx_bytes.count;
  520. tx_pkts_nic += q->tx_packets.count;
  521. rx_bytes_nic += q->rx_bytes.count;
  522. rx_pkts_nic += q->rx_packets.count;
  523. rx_drops_nic += q->rx_drops.count;
  524. }
  525. interface->tx_bytes_nic = tx_bytes_nic;
  526. interface->tx_packets_nic = tx_pkts_nic;
  527. interface->rx_bytes_nic = rx_bytes_nic;
  528. interface->rx_packets_nic = rx_pkts_nic;
  529. interface->rx_drops_nic = rx_drops_nic;
  530. /* Fill out the OS statistics structure */
  531. net_stats->rx_errors = rx_errors;
  532. net_stats->rx_dropped = interface->stats.nodesc_drop.count;
  533. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  534. }
  535. /**
  536. * fm10k_watchdog_flush_tx - flush queues on host not ready
  537. * @interface: pointer to the device interface structure
  538. **/
  539. static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
  540. {
  541. int some_tx_pending = 0;
  542. int i;
  543. /* nothing to do if carrier is up */
  544. if (netif_carrier_ok(interface->netdev))
  545. return;
  546. for (i = 0; i < interface->num_tx_queues; i++) {
  547. struct fm10k_ring *tx_ring = interface->tx_ring[i];
  548. if (tx_ring->next_to_use != tx_ring->next_to_clean) {
  549. some_tx_pending = 1;
  550. break;
  551. }
  552. }
  553. /* We've lost link, so the controller stops DMA, but we've got
  554. * queued Tx work that's never going to get done, so reset
  555. * controller to flush Tx.
  556. */
  557. if (some_tx_pending)
  558. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  559. }
  560. /**
  561. * fm10k_watchdog_subtask - check and bring link up
  562. * @interface: pointer to the device interface structure
  563. **/
  564. static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
  565. {
  566. /* if interface is down do nothing */
  567. if (test_bit(__FM10K_DOWN, interface->state) ||
  568. test_bit(__FM10K_RESETTING, interface->state))
  569. return;
  570. if (interface->host_ready)
  571. fm10k_watchdog_host_is_ready(interface);
  572. else
  573. fm10k_watchdog_host_not_ready(interface);
  574. /* update stats only once every second */
  575. if (time_is_before_jiffies(interface->next_stats_update))
  576. fm10k_update_stats(interface);
  577. /* flush any uncompleted work */
  578. fm10k_watchdog_flush_tx(interface);
  579. }
  580. /**
  581. * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
  582. * @interface: pointer to the device interface structure
  583. *
  584. * This function serves two purposes. First it strobes the interrupt lines
  585. * in order to make certain interrupts are occurring. Secondly it sets the
  586. * bits needed to check for TX hangs. As a result we should immediately
  587. * determine if a hang has occurred.
  588. */
  589. static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
  590. {
  591. int i;
  592. /* If we're down or resetting, just bail */
  593. if (test_bit(__FM10K_DOWN, interface->state) ||
  594. test_bit(__FM10K_RESETTING, interface->state))
  595. return;
  596. /* rate limit tx hang checks to only once every 2 seconds */
  597. if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
  598. return;
  599. interface->next_tx_hang_check = jiffies + (2 * HZ);
  600. if (netif_carrier_ok(interface->netdev)) {
  601. /* Force detection of hung controller */
  602. for (i = 0; i < interface->num_tx_queues; i++)
  603. set_check_for_tx_hang(interface->tx_ring[i]);
  604. /* Rearm all in-use q_vectors for immediate firing */
  605. for (i = 0; i < interface->num_q_vectors; i++) {
  606. struct fm10k_q_vector *qv = interface->q_vector[i];
  607. if (!qv->tx.count && !qv->rx.count)
  608. continue;
  609. writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
  610. }
  611. }
  612. }
  613. /**
  614. * fm10k_service_task - manages and runs subtasks
  615. * @work: pointer to work_struct containing our data
  616. **/
  617. static void fm10k_service_task(struct work_struct *work)
  618. {
  619. struct fm10k_intfc *interface;
  620. interface = container_of(work, struct fm10k_intfc, service_task);
  621. /* Check whether we're detached first */
  622. fm10k_detach_subtask(interface);
  623. /* tasks run even when interface is down */
  624. fm10k_mbx_subtask(interface);
  625. fm10k_reset_subtask(interface);
  626. /* tasks only run when interface is up */
  627. fm10k_watchdog_subtask(interface);
  628. fm10k_check_hang_subtask(interface);
  629. /* release lock on service events to allow scheduling next event */
  630. fm10k_service_event_complete(interface);
  631. }
  632. /**
  633. * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager
  634. * @work: pointer to work_struct containing our data
  635. *
  636. * This work item handles sending MAC/VLAN updates to the switch manager. When
  637. * the interface is up, it will attempt to queue mailbox messages to the
  638. * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the
  639. * mailbox is full, it will reschedule itself to try again in a short while.
  640. * This ensures that the driver does not overload the switch mailbox with too
  641. * many simultaneous requests, causing an unnecessary reset.
  642. **/
  643. static void fm10k_macvlan_task(struct work_struct *work)
  644. {
  645. struct fm10k_macvlan_request *item;
  646. struct fm10k_intfc *interface;
  647. struct delayed_work *dwork;
  648. struct list_head *requests;
  649. struct fm10k_hw *hw;
  650. unsigned long flags;
  651. dwork = to_delayed_work(work);
  652. interface = container_of(dwork, struct fm10k_intfc, macvlan_task);
  653. hw = &interface->hw;
  654. requests = &interface->macvlan_requests;
  655. do {
  656. /* Pop the first item off the list */
  657. spin_lock_irqsave(&interface->macvlan_lock, flags);
  658. item = list_first_entry_or_null(requests,
  659. struct fm10k_macvlan_request,
  660. list);
  661. if (item)
  662. list_del_init(&item->list);
  663. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  664. /* We have no more items to process */
  665. if (!item)
  666. goto done;
  667. fm10k_mbx_lock(interface);
  668. /* Check that we have plenty of space to send the message. We
  669. * want to ensure that the mailbox stays low enough to avoid a
  670. * change in the host state, otherwise we may see spurious
  671. * link up / link down notifications.
  672. */
  673. if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) {
  674. hw->mbx.ops.process(hw, &hw->mbx);
  675. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  676. fm10k_mbx_unlock(interface);
  677. /* Put the request back on the list */
  678. spin_lock_irqsave(&interface->macvlan_lock, flags);
  679. list_add(&item->list, requests);
  680. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  681. break;
  682. }
  683. switch (item->type) {
  684. case FM10K_MC_MAC_REQUEST:
  685. hw->mac.ops.update_mc_addr(hw,
  686. item->mac.glort,
  687. item->mac.addr,
  688. item->mac.vid,
  689. item->set);
  690. break;
  691. case FM10K_UC_MAC_REQUEST:
  692. hw->mac.ops.update_uc_addr(hw,
  693. item->mac.glort,
  694. item->mac.addr,
  695. item->mac.vid,
  696. item->set,
  697. 0);
  698. break;
  699. case FM10K_VLAN_REQUEST:
  700. hw->mac.ops.update_vlan(hw,
  701. item->vlan.vid,
  702. item->vlan.vsi,
  703. item->set);
  704. break;
  705. default:
  706. break;
  707. }
  708. fm10k_mbx_unlock(interface);
  709. /* Free the item now that we've sent the update */
  710. kfree(item);
  711. } while (true);
  712. done:
  713. WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state));
  714. /* flush memory to make sure state is correct */
  715. smp_mb__before_atomic();
  716. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  717. /* If a MAC/VLAN request was scheduled since we started, we should
  718. * re-schedule. However, there is no reason to re-schedule if there is
  719. * no work to do.
  720. */
  721. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  722. fm10k_macvlan_schedule(interface);
  723. }
  724. /**
  725. * fm10k_configure_tx_ring - Configure Tx ring after Reset
  726. * @interface: board private structure
  727. * @ring: structure containing ring specific data
  728. *
  729. * Configure the Tx descriptor ring after a reset.
  730. **/
  731. static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
  732. struct fm10k_ring *ring)
  733. {
  734. struct fm10k_hw *hw = &interface->hw;
  735. u64 tdba = ring->dma;
  736. u32 size = ring->count * sizeof(struct fm10k_tx_desc);
  737. u32 txint = FM10K_INT_MAP_DISABLE;
  738. u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
  739. u8 reg_idx = ring->reg_idx;
  740. /* disable queue to avoid issues while updating state */
  741. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
  742. fm10k_write_flush(hw);
  743. /* possible poll here to verify ring resources have been cleaned */
  744. /* set location and size for descriptor ring */
  745. fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
  746. fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
  747. fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
  748. /* reset head and tail pointers */
  749. fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
  750. fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
  751. /* store tail pointer */
  752. ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
  753. /* reset ntu and ntc to place SW in sync with hardware */
  754. ring->next_to_clean = 0;
  755. ring->next_to_use = 0;
  756. /* Map interrupt */
  757. if (ring->q_vector) {
  758. txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  759. txint |= FM10K_INT_MAP_TIMER0;
  760. }
  761. fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
  762. /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
  763. fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
  764. FM10K_PFVTCTL_FTAG_DESC_ENABLE);
  765. /* Initialize XPS */
  766. if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
  767. ring->q_vector)
  768. netif_set_xps_queue(ring->netdev,
  769. &ring->q_vector->affinity_mask,
  770. ring->queue_index);
  771. /* enable queue */
  772. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
  773. }
  774. /**
  775. * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
  776. * @interface: board private structure
  777. * @ring: structure containing ring specific data
  778. *
  779. * Verify the Tx descriptor ring is ready for transmit.
  780. **/
  781. static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
  782. struct fm10k_ring *ring)
  783. {
  784. struct fm10k_hw *hw = &interface->hw;
  785. int wait_loop = 10;
  786. u32 txdctl;
  787. u8 reg_idx = ring->reg_idx;
  788. /* if we are already enabled just exit */
  789. if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
  790. return;
  791. /* poll to verify queue is enabled */
  792. do {
  793. usleep_range(1000, 2000);
  794. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
  795. } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
  796. if (!wait_loop)
  797. netif_err(interface, drv, interface->netdev,
  798. "Could not enable Tx Queue %d\n", reg_idx);
  799. }
  800. /**
  801. * fm10k_configure_tx - Configure Transmit Unit after Reset
  802. * @interface: board private structure
  803. *
  804. * Configure the Tx unit of the MAC after a reset.
  805. **/
  806. static void fm10k_configure_tx(struct fm10k_intfc *interface)
  807. {
  808. int i;
  809. /* Setup the HW Tx Head and Tail descriptor pointers */
  810. for (i = 0; i < interface->num_tx_queues; i++)
  811. fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
  812. /* poll here to verify that Tx rings are now enabled */
  813. for (i = 0; i < interface->num_tx_queues; i++)
  814. fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
  815. }
  816. /**
  817. * fm10k_configure_rx_ring - Configure Rx ring after Reset
  818. * @interface: board private structure
  819. * @ring: structure containing ring specific data
  820. *
  821. * Configure the Rx descriptor ring after a reset.
  822. **/
  823. static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
  824. struct fm10k_ring *ring)
  825. {
  826. u64 rdba = ring->dma;
  827. struct fm10k_hw *hw = &interface->hw;
  828. u32 size = ring->count * sizeof(union fm10k_rx_desc);
  829. u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  830. u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
  831. u32 rxint = FM10K_INT_MAP_DISABLE;
  832. u8 rx_pause = interface->rx_pause;
  833. u8 reg_idx = ring->reg_idx;
  834. /* disable queue to avoid issues while updating state */
  835. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  836. rxqctl &= ~FM10K_RXQCTL_ENABLE;
  837. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  838. fm10k_write_flush(hw);
  839. /* possible poll here to verify ring resources have been cleaned */
  840. /* set location and size for descriptor ring */
  841. fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
  842. fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
  843. fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
  844. /* reset head and tail pointers */
  845. fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
  846. fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
  847. /* store tail pointer */
  848. ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
  849. /* reset ntu and ntc to place SW in sync with hardware */
  850. ring->next_to_clean = 0;
  851. ring->next_to_use = 0;
  852. ring->next_to_alloc = 0;
  853. /* Configure the Rx buffer size for one buff without split */
  854. srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
  855. /* Configure the Rx ring to suppress loopback packets */
  856. srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
  857. fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
  858. /* Enable drop on empty */
  859. #ifdef CONFIG_DCB
  860. if (interface->pfc_en)
  861. rx_pause = interface->pfc_en;
  862. #endif
  863. if (!(rx_pause & BIT(ring->qos_pc)))
  864. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  865. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  866. /* assign default VLAN to queue */
  867. ring->vid = hw->mac.default_vid;
  868. /* if we have an active VLAN, disable default VLAN ID */
  869. if (test_bit(hw->mac.default_vid, interface->active_vlans))
  870. ring->vid |= FM10K_VLAN_CLEAR;
  871. /* Map interrupt */
  872. if (ring->q_vector) {
  873. rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  874. rxint |= FM10K_INT_MAP_TIMER1;
  875. }
  876. fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
  877. /* enable queue */
  878. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  879. rxqctl |= FM10K_RXQCTL_ENABLE;
  880. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  881. /* place buffers on ring for receive data */
  882. fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
  883. }
  884. /**
  885. * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
  886. * @interface: board private structure
  887. *
  888. * Configure the drop enable bits for the Rx rings.
  889. **/
  890. void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
  891. {
  892. struct fm10k_hw *hw = &interface->hw;
  893. u8 rx_pause = interface->rx_pause;
  894. int i;
  895. #ifdef CONFIG_DCB
  896. if (interface->pfc_en)
  897. rx_pause = interface->pfc_en;
  898. #endif
  899. for (i = 0; i < interface->num_rx_queues; i++) {
  900. struct fm10k_ring *ring = interface->rx_ring[i];
  901. u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  902. u8 reg_idx = ring->reg_idx;
  903. if (!(rx_pause & BIT(ring->qos_pc)))
  904. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  905. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  906. }
  907. }
  908. /**
  909. * fm10k_configure_dglort - Configure Receive DGLORT after reset
  910. * @interface: board private structure
  911. *
  912. * Configure the DGLORT description and RSS tables.
  913. **/
  914. static void fm10k_configure_dglort(struct fm10k_intfc *interface)
  915. {
  916. struct fm10k_dglort_cfg dglort = { 0 };
  917. struct fm10k_hw *hw = &interface->hw;
  918. int i;
  919. u32 mrqc;
  920. /* Fill out hash function seeds */
  921. for (i = 0; i < FM10K_RSSRK_SIZE; i++)
  922. fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
  923. /* Write RETA table to hardware */
  924. for (i = 0; i < FM10K_RETA_SIZE; i++)
  925. fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
  926. /* Generate RSS hash based on packet types, TCP/UDP
  927. * port numbers and/or IPv4/v6 src and dst addresses
  928. */
  929. mrqc = FM10K_MRQC_IPV4 |
  930. FM10K_MRQC_TCP_IPV4 |
  931. FM10K_MRQC_IPV6 |
  932. FM10K_MRQC_TCP_IPV6;
  933. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
  934. mrqc |= FM10K_MRQC_UDP_IPV4;
  935. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
  936. mrqc |= FM10K_MRQC_UDP_IPV6;
  937. fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
  938. /* configure default DGLORT mapping for RSS/DCB */
  939. dglort.inner_rss = 1;
  940. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  941. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  942. hw->mac.ops.configure_dglort_map(hw, &dglort);
  943. /* assign GLORT per queue for queue mapped testing */
  944. if (interface->glort_count > 64) {
  945. memset(&dglort, 0, sizeof(dglort));
  946. dglort.inner_rss = 1;
  947. dglort.glort = interface->glort + 64;
  948. dglort.idx = fm10k_dglort_pf_queue;
  949. dglort.queue_l = fls(interface->num_rx_queues - 1);
  950. hw->mac.ops.configure_dglort_map(hw, &dglort);
  951. }
  952. /* assign glort value for RSS/DCB specific to this interface */
  953. memset(&dglort, 0, sizeof(dglort));
  954. dglort.inner_rss = 1;
  955. dglort.glort = interface->glort;
  956. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  957. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  958. /* configure DGLORT mapping for RSS/DCB */
  959. dglort.idx = fm10k_dglort_pf_rss;
  960. if (interface->l2_accel)
  961. dglort.shared_l = fls(interface->l2_accel->size);
  962. hw->mac.ops.configure_dglort_map(hw, &dglort);
  963. }
  964. /**
  965. * fm10k_configure_rx - Configure Receive Unit after Reset
  966. * @interface: board private structure
  967. *
  968. * Configure the Rx unit of the MAC after a reset.
  969. **/
  970. static void fm10k_configure_rx(struct fm10k_intfc *interface)
  971. {
  972. int i;
  973. /* Configure SWPRI to PC map */
  974. fm10k_configure_swpri_map(interface);
  975. /* Configure RSS and DGLORT map */
  976. fm10k_configure_dglort(interface);
  977. /* Setup the HW Rx Head and Tail descriptor pointers */
  978. for (i = 0; i < interface->num_rx_queues; i++)
  979. fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
  980. /* possible poll here to verify that Rx rings are now enabled */
  981. }
  982. static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
  983. {
  984. struct fm10k_q_vector *q_vector;
  985. int q_idx;
  986. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  987. q_vector = interface->q_vector[q_idx];
  988. napi_enable(&q_vector->napi);
  989. }
  990. }
  991. static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
  992. {
  993. struct fm10k_q_vector *q_vector = data;
  994. if (q_vector->rx.count || q_vector->tx.count)
  995. napi_schedule_irqoff(&q_vector->napi);
  996. return IRQ_HANDLED;
  997. }
  998. static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
  999. {
  1000. struct fm10k_intfc *interface = data;
  1001. struct fm10k_hw *hw = &interface->hw;
  1002. struct fm10k_mbx_info *mbx = &hw->mbx;
  1003. /* re-enable mailbox interrupt and indicate 20us delay */
  1004. fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
  1005. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  1006. FM10K_ITR_ENABLE);
  1007. /* service upstream mailbox */
  1008. if (fm10k_mbx_trylock(interface)) {
  1009. mbx->ops.process(hw, mbx);
  1010. fm10k_mbx_unlock(interface);
  1011. }
  1012. hw->mac.get_host_state = true;
  1013. fm10k_service_event_schedule(interface);
  1014. return IRQ_HANDLED;
  1015. }
  1016. #ifdef CONFIG_NET_POLL_CONTROLLER
  1017. /**
  1018. * fm10k_netpoll - A Polling 'interrupt' handler
  1019. * @netdev: network interface device structure
  1020. *
  1021. * This is used by netconsole to send skbs without having to re-enable
  1022. * interrupts. It's not called while the normal interrupt routine is executing.
  1023. **/
  1024. void fm10k_netpoll(struct net_device *netdev)
  1025. {
  1026. struct fm10k_intfc *interface = netdev_priv(netdev);
  1027. int i;
  1028. /* if interface is down do nothing */
  1029. if (test_bit(__FM10K_DOWN, interface->state))
  1030. return;
  1031. for (i = 0; i < interface->num_q_vectors; i++)
  1032. fm10k_msix_clean_rings(0, interface->q_vector[i]);
  1033. }
  1034. #endif
  1035. #define FM10K_ERR_MSG(type) case (type): error = #type; break
  1036. static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
  1037. struct fm10k_fault *fault)
  1038. {
  1039. struct pci_dev *pdev = interface->pdev;
  1040. struct fm10k_hw *hw = &interface->hw;
  1041. struct fm10k_iov_data *iov_data = interface->iov_data;
  1042. char *error;
  1043. switch (type) {
  1044. case FM10K_PCA_FAULT:
  1045. switch (fault->type) {
  1046. default:
  1047. error = "Unknown PCA error";
  1048. break;
  1049. FM10K_ERR_MSG(PCA_NO_FAULT);
  1050. FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
  1051. FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
  1052. FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
  1053. FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
  1054. FM10K_ERR_MSG(PCA_POISONED_TLP);
  1055. FM10K_ERR_MSG(PCA_TLP_ABORT);
  1056. }
  1057. break;
  1058. case FM10K_THI_FAULT:
  1059. switch (fault->type) {
  1060. default:
  1061. error = "Unknown THI error";
  1062. break;
  1063. FM10K_ERR_MSG(THI_NO_FAULT);
  1064. FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
  1065. }
  1066. break;
  1067. case FM10K_FUM_FAULT:
  1068. switch (fault->type) {
  1069. default:
  1070. error = "Unknown FUM error";
  1071. break;
  1072. FM10K_ERR_MSG(FUM_NO_FAULT);
  1073. FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
  1074. FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
  1075. FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
  1076. FM10K_ERR_MSG(FUM_RO_ERROR);
  1077. FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
  1078. FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
  1079. FM10K_ERR_MSG(FUM_INVALID_TYPE);
  1080. FM10K_ERR_MSG(FUM_INVALID_LENGTH);
  1081. FM10K_ERR_MSG(FUM_INVALID_BE);
  1082. FM10K_ERR_MSG(FUM_INVALID_ALIGN);
  1083. }
  1084. break;
  1085. default:
  1086. error = "Undocumented fault";
  1087. break;
  1088. }
  1089. dev_warn(&pdev->dev,
  1090. "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
  1091. error, fault->address, fault->specinfo,
  1092. PCI_SLOT(fault->func), PCI_FUNC(fault->func));
  1093. /* For VF faults, clear out the respective LPORT, reset the queue
  1094. * resources, and then reconnect to the mailbox. This allows the
  1095. * VF in question to resume behavior. For transient faults that are
  1096. * the result of non-malicious behavior this will log the fault and
  1097. * allow the VF to resume functionality. Obviously for malicious VFs
  1098. * they will be able to attempt malicious behavior again. In this
  1099. * case, the system administrator will need to step in and manually
  1100. * remove or disable the VF in question.
  1101. */
  1102. if (fault->func && iov_data) {
  1103. int vf = fault->func - 1;
  1104. struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
  1105. hw->iov.ops.reset_lport(hw, vf_info);
  1106. hw->iov.ops.reset_resources(hw, vf_info);
  1107. /* reset_lport disables the VF, so re-enable it */
  1108. hw->iov.ops.set_lport(hw, vf_info, vf,
  1109. FM10K_VF_FLAG_MULTI_CAPABLE);
  1110. /* reset_resources will disconnect from the mbx */
  1111. vf_info->mbx.ops.connect(hw, &vf_info->mbx);
  1112. }
  1113. }
  1114. static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
  1115. {
  1116. struct fm10k_hw *hw = &interface->hw;
  1117. struct fm10k_fault fault = { 0 };
  1118. int type, err;
  1119. for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
  1120. eicr;
  1121. eicr >>= 1, type += FM10K_FAULT_SIZE) {
  1122. /* only check if there is an error reported */
  1123. if (!(eicr & 0x1))
  1124. continue;
  1125. /* retrieve fault info */
  1126. err = hw->mac.ops.get_fault(hw, type, &fault);
  1127. if (err) {
  1128. dev_err(&interface->pdev->dev,
  1129. "error reading fault\n");
  1130. continue;
  1131. }
  1132. fm10k_handle_fault(interface, type, &fault);
  1133. }
  1134. }
  1135. static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
  1136. {
  1137. struct fm10k_hw *hw = &interface->hw;
  1138. const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  1139. u32 maxholdq;
  1140. int q;
  1141. if (!(eicr & FM10K_EICR_MAXHOLDTIME))
  1142. return;
  1143. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
  1144. if (maxholdq)
  1145. fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
  1146. for (q = 255;;) {
  1147. if (maxholdq & BIT(31)) {
  1148. if (q < FM10K_MAX_QUEUES_PF) {
  1149. interface->rx_overrun_pf++;
  1150. fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
  1151. } else {
  1152. interface->rx_overrun_vf++;
  1153. }
  1154. }
  1155. maxholdq *= 2;
  1156. if (!maxholdq)
  1157. q &= ~(32 - 1);
  1158. if (!q)
  1159. break;
  1160. if (q-- % 32)
  1161. continue;
  1162. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
  1163. if (maxholdq)
  1164. fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
  1165. }
  1166. }
  1167. static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
  1168. {
  1169. struct fm10k_intfc *interface = data;
  1170. struct fm10k_hw *hw = &interface->hw;
  1171. struct fm10k_mbx_info *mbx = &hw->mbx;
  1172. u32 eicr;
  1173. s32 err = 0;
  1174. /* unmask any set bits related to this interrupt */
  1175. eicr = fm10k_read_reg(hw, FM10K_EICR);
  1176. fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
  1177. FM10K_EICR_SWITCHREADY |
  1178. FM10K_EICR_SWITCHNOTREADY));
  1179. /* report any faults found to the message log */
  1180. fm10k_report_fault(interface, eicr);
  1181. /* reset any queues disabled due to receiver overrun */
  1182. fm10k_reset_drop_on_empty(interface, eicr);
  1183. /* service mailboxes */
  1184. if (fm10k_mbx_trylock(interface)) {
  1185. err = mbx->ops.process(hw, mbx);
  1186. /* handle VFLRE events */
  1187. fm10k_iov_event(interface);
  1188. fm10k_mbx_unlock(interface);
  1189. }
  1190. if (err == FM10K_ERR_RESET_REQUESTED)
  1191. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1192. /* if switch toggled state we should reset GLORTs */
  1193. if (eicr & FM10K_EICR_SWITCHNOTREADY) {
  1194. /* force link down for at least 4 seconds */
  1195. interface->link_down_event = jiffies + (4 * HZ);
  1196. set_bit(__FM10K_LINK_DOWN, interface->state);
  1197. /* reset dglort_map back to no config */
  1198. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1199. }
  1200. /* we should validate host state after interrupt event */
  1201. hw->mac.get_host_state = true;
  1202. /* validate host state, and handle VF mailboxes in the service task */
  1203. fm10k_service_event_schedule(interface);
  1204. /* re-enable mailbox interrupt and indicate 20us delay */
  1205. fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
  1206. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  1207. FM10K_ITR_ENABLE);
  1208. return IRQ_HANDLED;
  1209. }
  1210. void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
  1211. {
  1212. struct fm10k_hw *hw = &interface->hw;
  1213. struct msix_entry *entry;
  1214. int itr_reg;
  1215. /* no mailbox IRQ to free if MSI-X is not enabled */
  1216. if (!interface->msix_entries)
  1217. return;
  1218. entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1219. /* disconnect the mailbox */
  1220. hw->mbx.ops.disconnect(hw, &hw->mbx);
  1221. /* disable Mailbox cause */
  1222. if (hw->mac.type == fm10k_mac_pf) {
  1223. fm10k_write_reg(hw, FM10K_EIMR,
  1224. FM10K_EIMR_DISABLE(PCA_FAULT) |
  1225. FM10K_EIMR_DISABLE(FUM_FAULT) |
  1226. FM10K_EIMR_DISABLE(MAILBOX) |
  1227. FM10K_EIMR_DISABLE(SWITCHREADY) |
  1228. FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
  1229. FM10K_EIMR_DISABLE(SRAMERROR) |
  1230. FM10K_EIMR_DISABLE(VFLR) |
  1231. FM10K_EIMR_DISABLE(MAXHOLDTIME));
  1232. itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
  1233. } else {
  1234. itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR);
  1235. }
  1236. fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
  1237. free_irq(entry->vector, interface);
  1238. }
  1239. static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
  1240. struct fm10k_mbx_info *mbx)
  1241. {
  1242. bool vlan_override = hw->mac.vlan_override;
  1243. u16 default_vid = hw->mac.default_vid;
  1244. struct fm10k_intfc *interface;
  1245. s32 err;
  1246. err = fm10k_msg_mac_vlan_vf(hw, results, mbx);
  1247. if (err)
  1248. return err;
  1249. interface = container_of(hw, struct fm10k_intfc, hw);
  1250. /* MAC was changed so we need reset */
  1251. if (is_valid_ether_addr(hw->mac.perm_addr) &&
  1252. !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
  1253. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1254. /* VLAN override was changed, or default VLAN changed */
  1255. if ((vlan_override != hw->mac.vlan_override) ||
  1256. (default_vid != hw->mac.default_vid))
  1257. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1258. return 0;
  1259. }
  1260. /* generic error handler for mailbox issues */
  1261. static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
  1262. struct fm10k_mbx_info __always_unused *mbx)
  1263. {
  1264. struct fm10k_intfc *interface;
  1265. struct pci_dev *pdev;
  1266. interface = container_of(hw, struct fm10k_intfc, hw);
  1267. pdev = interface->pdev;
  1268. dev_err(&pdev->dev, "Unknown message ID %u\n",
  1269. **results & FM10K_TLV_ID_MASK);
  1270. return 0;
  1271. }
  1272. static const struct fm10k_msg_data vf_mbx_data[] = {
  1273. FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
  1274. FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
  1275. FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
  1276. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1277. };
  1278. static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
  1279. {
  1280. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1281. struct net_device *dev = interface->netdev;
  1282. struct fm10k_hw *hw = &interface->hw;
  1283. int err;
  1284. /* Use timer0 for interrupt moderation on the mailbox */
  1285. u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1286. /* register mailbox handlers */
  1287. err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
  1288. if (err)
  1289. return err;
  1290. /* request the IRQ */
  1291. err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0,
  1292. dev->name, interface);
  1293. if (err) {
  1294. netif_err(interface, probe, dev,
  1295. "request_irq for msix_mbx failed: %d\n", err);
  1296. return err;
  1297. }
  1298. /* map all of the interrupt sources */
  1299. fm10k_write_reg(hw, FM10K_VFINT_MAP, itr);
  1300. /* enable interrupt */
  1301. fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE);
  1302. return 0;
  1303. }
  1304. static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
  1305. struct fm10k_mbx_info *mbx)
  1306. {
  1307. struct fm10k_intfc *interface;
  1308. u32 dglort_map = hw->mac.dglort_map;
  1309. s32 err;
  1310. interface = container_of(hw, struct fm10k_intfc, hw);
  1311. err = fm10k_msg_err_pf(hw, results, mbx);
  1312. if (!err && hw->swapi.status) {
  1313. /* force link down for a reasonable delay */
  1314. interface->link_down_event = jiffies + (2 * HZ);
  1315. set_bit(__FM10K_LINK_DOWN, interface->state);
  1316. /* reset dglort_map back to no config */
  1317. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1318. fm10k_service_event_schedule(interface);
  1319. /* prevent overloading kernel message buffer */
  1320. if (interface->lport_map_failed)
  1321. return 0;
  1322. interface->lport_map_failed = true;
  1323. if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
  1324. dev_warn(&interface->pdev->dev,
  1325. "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
  1326. dev_warn(&interface->pdev->dev,
  1327. "request logical port map failed: %d\n",
  1328. hw->swapi.status);
  1329. return 0;
  1330. }
  1331. err = fm10k_msg_lport_map_pf(hw, results, mbx);
  1332. if (err)
  1333. return err;
  1334. interface->lport_map_failed = false;
  1335. /* we need to reset if port count was just updated */
  1336. if (dglort_map != hw->mac.dglort_map)
  1337. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1338. return 0;
  1339. }
  1340. static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
  1341. struct fm10k_mbx_info __always_unused *mbx)
  1342. {
  1343. struct fm10k_intfc *interface;
  1344. u16 glort, pvid;
  1345. u32 pvid_update;
  1346. s32 err;
  1347. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
  1348. &pvid_update);
  1349. if (err)
  1350. return err;
  1351. /* extract values from the pvid update */
  1352. glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
  1353. pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
  1354. /* if glort is not valid return error */
  1355. if (!fm10k_glort_valid_pf(hw, glort))
  1356. return FM10K_ERR_PARAM;
  1357. /* verify VLAN ID is valid */
  1358. if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
  1359. return FM10K_ERR_PARAM;
  1360. interface = container_of(hw, struct fm10k_intfc, hw);
  1361. /* check to see if this belongs to one of the VFs */
  1362. err = fm10k_iov_update_pvid(interface, glort, pvid);
  1363. if (!err)
  1364. return 0;
  1365. /* we need to reset if default VLAN was just updated */
  1366. if (pvid != hw->mac.default_vid)
  1367. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1368. hw->mac.default_vid = pvid;
  1369. return 0;
  1370. }
  1371. static const struct fm10k_msg_data pf_mbx_data[] = {
  1372. FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
  1373. FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
  1374. FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
  1375. FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
  1376. FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
  1377. FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
  1378. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1379. };
  1380. static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
  1381. {
  1382. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1383. struct net_device *dev = interface->netdev;
  1384. struct fm10k_hw *hw = &interface->hw;
  1385. int err;
  1386. /* Use timer0 for interrupt moderation on the mailbox */
  1387. u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1388. u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
  1389. /* register mailbox handlers */
  1390. err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
  1391. if (err)
  1392. return err;
  1393. /* request the IRQ */
  1394. err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
  1395. dev->name, interface);
  1396. if (err) {
  1397. netif_err(interface, probe, dev,
  1398. "request_irq for msix_mbx failed: %d\n", err);
  1399. return err;
  1400. }
  1401. /* Enable interrupts w/ no moderation for "other" interrupts */
  1402. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr);
  1403. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr);
  1404. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr);
  1405. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr);
  1406. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr);
  1407. /* Enable interrupts w/ moderation for mailbox */
  1408. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr);
  1409. /* Enable individual interrupt causes */
  1410. fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
  1411. FM10K_EIMR_ENABLE(FUM_FAULT) |
  1412. FM10K_EIMR_ENABLE(MAILBOX) |
  1413. FM10K_EIMR_ENABLE(SWITCHREADY) |
  1414. FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
  1415. FM10K_EIMR_ENABLE(SRAMERROR) |
  1416. FM10K_EIMR_ENABLE(VFLR) |
  1417. FM10K_EIMR_ENABLE(MAXHOLDTIME));
  1418. /* enable interrupt */
  1419. fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
  1420. return 0;
  1421. }
  1422. int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
  1423. {
  1424. struct fm10k_hw *hw = &interface->hw;
  1425. int err;
  1426. /* enable Mailbox cause */
  1427. if (hw->mac.type == fm10k_mac_pf)
  1428. err = fm10k_mbx_request_irq_pf(interface);
  1429. else
  1430. err = fm10k_mbx_request_irq_vf(interface);
  1431. if (err)
  1432. return err;
  1433. /* connect mailbox */
  1434. err = hw->mbx.ops.connect(hw, &hw->mbx);
  1435. /* if the mailbox failed to connect, then free IRQ */
  1436. if (err)
  1437. fm10k_mbx_free_irq(interface);
  1438. return err;
  1439. }
  1440. /**
  1441. * fm10k_qv_free_irq - release interrupts associated with queue vectors
  1442. * @interface: board private structure
  1443. *
  1444. * Release all interrupts associated with this interface
  1445. **/
  1446. void fm10k_qv_free_irq(struct fm10k_intfc *interface)
  1447. {
  1448. int vector = interface->num_q_vectors;
  1449. struct fm10k_hw *hw = &interface->hw;
  1450. struct msix_entry *entry;
  1451. entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
  1452. while (vector) {
  1453. struct fm10k_q_vector *q_vector;
  1454. vector--;
  1455. entry--;
  1456. q_vector = interface->q_vector[vector];
  1457. if (!q_vector->tx.count && !q_vector->rx.count)
  1458. continue;
  1459. /* clear the affinity_mask in the IRQ descriptor */
  1460. irq_set_affinity_hint(entry->vector, NULL);
  1461. /* disable interrupts */
  1462. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1463. free_irq(entry->vector, q_vector);
  1464. }
  1465. }
  1466. /**
  1467. * fm10k_qv_request_irq - initialize interrupts for queue vectors
  1468. * @interface: board private structure
  1469. *
  1470. * Attempts to configure interrupts using the best available
  1471. * capabilities of the hardware and kernel.
  1472. **/
  1473. int fm10k_qv_request_irq(struct fm10k_intfc *interface)
  1474. {
  1475. struct net_device *dev = interface->netdev;
  1476. struct fm10k_hw *hw = &interface->hw;
  1477. struct msix_entry *entry;
  1478. unsigned int ri = 0, ti = 0;
  1479. int vector, err;
  1480. entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
  1481. for (vector = 0; vector < interface->num_q_vectors; vector++) {
  1482. struct fm10k_q_vector *q_vector = interface->q_vector[vector];
  1483. /* name the vector */
  1484. if (q_vector->tx.count && q_vector->rx.count) {
  1485. snprintf(q_vector->name, sizeof(q_vector->name),
  1486. "%s-TxRx-%u", dev->name, ri++);
  1487. ti++;
  1488. } else if (q_vector->rx.count) {
  1489. snprintf(q_vector->name, sizeof(q_vector->name),
  1490. "%s-rx-%u", dev->name, ri++);
  1491. } else if (q_vector->tx.count) {
  1492. snprintf(q_vector->name, sizeof(q_vector->name),
  1493. "%s-tx-%u", dev->name, ti++);
  1494. } else {
  1495. /* skip this unused q_vector */
  1496. continue;
  1497. }
  1498. /* Assign ITR register to q_vector */
  1499. q_vector->itr = (hw->mac.type == fm10k_mac_pf) ?
  1500. &interface->uc_addr[FM10K_ITR(entry->entry)] :
  1501. &interface->uc_addr[FM10K_VFITR(entry->entry)];
  1502. /* request the IRQ */
  1503. err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
  1504. q_vector->name, q_vector);
  1505. if (err) {
  1506. netif_err(interface, probe, dev,
  1507. "request_irq failed for MSIX interrupt Error: %d\n",
  1508. err);
  1509. goto err_out;
  1510. }
  1511. /* assign the mask for this irq */
  1512. irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
  1513. /* Enable q_vector */
  1514. writel(FM10K_ITR_ENABLE, q_vector->itr);
  1515. entry++;
  1516. }
  1517. return 0;
  1518. err_out:
  1519. /* wind through the ring freeing all entries and vectors */
  1520. while (vector) {
  1521. struct fm10k_q_vector *q_vector;
  1522. entry--;
  1523. vector--;
  1524. q_vector = interface->q_vector[vector];
  1525. if (!q_vector->tx.count && !q_vector->rx.count)
  1526. continue;
  1527. /* clear the affinity_mask in the IRQ descriptor */
  1528. irq_set_affinity_hint(entry->vector, NULL);
  1529. /* disable interrupts */
  1530. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1531. free_irq(entry->vector, q_vector);
  1532. }
  1533. return err;
  1534. }
  1535. void fm10k_up(struct fm10k_intfc *interface)
  1536. {
  1537. struct fm10k_hw *hw = &interface->hw;
  1538. /* Enable Tx/Rx DMA */
  1539. hw->mac.ops.start_hw(hw);
  1540. /* configure Tx descriptor rings */
  1541. fm10k_configure_tx(interface);
  1542. /* configure Rx descriptor rings */
  1543. fm10k_configure_rx(interface);
  1544. /* configure interrupts */
  1545. hw->mac.ops.update_int_moderator(hw);
  1546. /* enable statistics capture again */
  1547. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  1548. /* clear down bit to indicate we are ready to go */
  1549. clear_bit(__FM10K_DOWN, interface->state);
  1550. /* enable polling cleanups */
  1551. fm10k_napi_enable_all(interface);
  1552. /* re-establish Rx filters */
  1553. fm10k_restore_rx_state(interface);
  1554. /* enable transmits */
  1555. netif_tx_start_all_queues(interface->netdev);
  1556. /* kick off the service timer now */
  1557. hw->mac.get_host_state = true;
  1558. mod_timer(&interface->service_timer, jiffies);
  1559. }
  1560. static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
  1561. {
  1562. struct fm10k_q_vector *q_vector;
  1563. int q_idx;
  1564. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  1565. q_vector = interface->q_vector[q_idx];
  1566. napi_disable(&q_vector->napi);
  1567. }
  1568. }
  1569. void fm10k_down(struct fm10k_intfc *interface)
  1570. {
  1571. struct net_device *netdev = interface->netdev;
  1572. struct fm10k_hw *hw = &interface->hw;
  1573. int err, i = 0, count = 0;
  1574. /* signal that we are down to the interrupt handler and service task */
  1575. if (test_and_set_bit(__FM10K_DOWN, interface->state))
  1576. return;
  1577. /* call carrier off first to avoid false dev_watchdog timeouts */
  1578. netif_carrier_off(netdev);
  1579. /* disable transmits */
  1580. netif_tx_stop_all_queues(netdev);
  1581. netif_tx_disable(netdev);
  1582. /* reset Rx filters */
  1583. fm10k_reset_rx_state(interface);
  1584. /* disable polling routines */
  1585. fm10k_napi_disable_all(interface);
  1586. /* capture stats one last time before stopping interface */
  1587. fm10k_update_stats(interface);
  1588. /* prevent updating statistics while we're down */
  1589. while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  1590. usleep_range(1000, 2000);
  1591. /* skip waiting for TX DMA if we lost PCIe link */
  1592. if (FM10K_REMOVED(hw->hw_addr))
  1593. goto skip_tx_dma_drain;
  1594. /* In some rare circumstances it can take a while for Tx queues to
  1595. * quiesce and be fully disabled. Attempt to .stop_hw() first, and
  1596. * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
  1597. * until the Tx queues have emptied, or until a number of retries. If
  1598. * we fail to clear within the retry loop, we will issue a warning
  1599. * indicating that Tx DMA is probably hung. Note this means we call
  1600. * .stop_hw() twice but this shouldn't cause any problems.
  1601. */
  1602. err = hw->mac.ops.stop_hw(hw);
  1603. if (err != FM10K_ERR_REQUESTS_PENDING)
  1604. goto skip_tx_dma_drain;
  1605. #define TX_DMA_DRAIN_RETRIES 25
  1606. for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
  1607. usleep_range(10000, 20000);
  1608. /* start checking at the last ring to have pending Tx */
  1609. for (; i < interface->num_tx_queues; i++)
  1610. if (fm10k_get_tx_pending(interface->tx_ring[i], false))
  1611. break;
  1612. /* if all the queues are drained, we can break now */
  1613. if (i == interface->num_tx_queues)
  1614. break;
  1615. }
  1616. if (count >= TX_DMA_DRAIN_RETRIES)
  1617. dev_err(&interface->pdev->dev,
  1618. "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
  1619. count);
  1620. skip_tx_dma_drain:
  1621. /* Disable DMA engine for Tx/Rx */
  1622. err = hw->mac.ops.stop_hw(hw);
  1623. if (err == FM10K_ERR_REQUESTS_PENDING)
  1624. dev_err(&interface->pdev->dev,
  1625. "due to pending requests hw was not shut down gracefully\n");
  1626. else if (err)
  1627. dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
  1628. /* free any buffers still on the rings */
  1629. fm10k_clean_all_tx_rings(interface);
  1630. fm10k_clean_all_rx_rings(interface);
  1631. }
  1632. /**
  1633. * fm10k_sw_init - Initialize general software structures
  1634. * @interface: host interface private structure to initialize
  1635. * @ent: PCI device ID entry
  1636. *
  1637. * fm10k_sw_init initializes the interface private data structure.
  1638. * Fields are initialized based on PCI device information and
  1639. * OS network device settings (MTU size).
  1640. **/
  1641. static int fm10k_sw_init(struct fm10k_intfc *interface,
  1642. const struct pci_device_id *ent)
  1643. {
  1644. const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
  1645. struct fm10k_hw *hw = &interface->hw;
  1646. struct pci_dev *pdev = interface->pdev;
  1647. struct net_device *netdev = interface->netdev;
  1648. u32 rss_key[FM10K_RSSRK_SIZE];
  1649. unsigned int rss;
  1650. int err;
  1651. /* initialize back pointer */
  1652. hw->back = interface;
  1653. hw->hw_addr = interface->uc_addr;
  1654. /* PCI config space info */
  1655. hw->vendor_id = pdev->vendor;
  1656. hw->device_id = pdev->device;
  1657. hw->revision_id = pdev->revision;
  1658. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1659. hw->subsystem_device_id = pdev->subsystem_device;
  1660. /* Setup hw api */
  1661. memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
  1662. hw->mac.type = fi->mac;
  1663. /* Setup IOV handlers */
  1664. if (fi->iov_ops)
  1665. memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
  1666. /* Set common capability flags and settings */
  1667. rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
  1668. interface->ring_feature[RING_F_RSS].limit = rss;
  1669. fi->get_invariants(hw);
  1670. /* pick up the PCIe bus settings for reporting later */
  1671. if (hw->mac.ops.get_bus_info)
  1672. hw->mac.ops.get_bus_info(hw);
  1673. /* limit the usable DMA range */
  1674. if (hw->mac.ops.set_dma_mask)
  1675. hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
  1676. /* update netdev with DMA restrictions */
  1677. if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
  1678. netdev->features |= NETIF_F_HIGHDMA;
  1679. netdev->vlan_features |= NETIF_F_HIGHDMA;
  1680. }
  1681. /* reset and initialize the hardware so it is in a known state */
  1682. err = hw->mac.ops.reset_hw(hw);
  1683. if (err) {
  1684. dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
  1685. return err;
  1686. }
  1687. err = hw->mac.ops.init_hw(hw);
  1688. if (err) {
  1689. dev_err(&pdev->dev, "init_hw failed: %d\n", err);
  1690. return err;
  1691. }
  1692. /* initialize hardware statistics */
  1693. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  1694. /* Set upper limit on IOV VFs that can be allocated */
  1695. pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
  1696. /* Start with random Ethernet address */
  1697. eth_random_addr(hw->mac.addr);
  1698. /* Initialize MAC address from hardware */
  1699. err = hw->mac.ops.read_mac_addr(hw);
  1700. if (err) {
  1701. dev_warn(&pdev->dev,
  1702. "Failed to obtain MAC address defaulting to random\n");
  1703. /* tag address assignment as random */
  1704. netdev->addr_assign_type |= NET_ADDR_RANDOM;
  1705. }
  1706. ether_addr_copy(netdev->dev_addr, hw->mac.addr);
  1707. ether_addr_copy(netdev->perm_addr, hw->mac.addr);
  1708. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1709. dev_err(&pdev->dev, "Invalid MAC Address\n");
  1710. return -EIO;
  1711. }
  1712. /* initialize DCBNL interface */
  1713. fm10k_dcbnl_set_ops(netdev);
  1714. /* set default ring sizes */
  1715. interface->tx_ring_count = FM10K_DEFAULT_TXD;
  1716. interface->rx_ring_count = FM10K_DEFAULT_RXD;
  1717. /* set default interrupt moderation */
  1718. interface->tx_itr = FM10K_TX_ITR_DEFAULT;
  1719. interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
  1720. /* initialize udp port lists */
  1721. INIT_LIST_HEAD(&interface->vxlan_port);
  1722. INIT_LIST_HEAD(&interface->geneve_port);
  1723. /* Initialize the MAC/VLAN queue */
  1724. INIT_LIST_HEAD(&interface->macvlan_requests);
  1725. netdev_rss_key_fill(rss_key, sizeof(rss_key));
  1726. memcpy(interface->rssrk, rss_key, sizeof(rss_key));
  1727. /* Initialize the mailbox lock */
  1728. spin_lock_init(&interface->mbx_lock);
  1729. spin_lock_init(&interface->macvlan_lock);
  1730. /* Start off interface as being down */
  1731. set_bit(__FM10K_DOWN, interface->state);
  1732. set_bit(__FM10K_UPDATING_STATS, interface->state);
  1733. return 0;
  1734. }
  1735. /**
  1736. * fm10k_probe - Device Initialization Routine
  1737. * @pdev: PCI device information struct
  1738. * @ent: entry in fm10k_pci_tbl
  1739. *
  1740. * Returns 0 on success, negative on failure
  1741. *
  1742. * fm10k_probe initializes an interface identified by a pci_dev structure.
  1743. * The OS initialization, configuring of the interface private structure,
  1744. * and a hardware reset occur.
  1745. **/
  1746. static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1747. {
  1748. struct net_device *netdev;
  1749. struct fm10k_intfc *interface;
  1750. int err;
  1751. if (pdev->error_state != pci_channel_io_normal) {
  1752. dev_err(&pdev->dev,
  1753. "PCI device still in an error state. Unable to load...\n");
  1754. return -EIO;
  1755. }
  1756. err = pci_enable_device_mem(pdev);
  1757. if (err) {
  1758. dev_err(&pdev->dev,
  1759. "PCI enable device failed: %d\n", err);
  1760. return err;
  1761. }
  1762. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
  1763. if (err)
  1764. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1765. if (err) {
  1766. dev_err(&pdev->dev,
  1767. "DMA configuration failed: %d\n", err);
  1768. goto err_dma;
  1769. }
  1770. err = pci_request_mem_regions(pdev, fm10k_driver_name);
  1771. if (err) {
  1772. dev_err(&pdev->dev,
  1773. "pci_request_selected_regions failed: %d\n", err);
  1774. goto err_pci_reg;
  1775. }
  1776. pci_enable_pcie_error_reporting(pdev);
  1777. pci_set_master(pdev);
  1778. pci_save_state(pdev);
  1779. netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]);
  1780. if (!netdev) {
  1781. err = -ENOMEM;
  1782. goto err_alloc_netdev;
  1783. }
  1784. SET_NETDEV_DEV(netdev, &pdev->dev);
  1785. interface = netdev_priv(netdev);
  1786. pci_set_drvdata(pdev, interface);
  1787. interface->netdev = netdev;
  1788. interface->pdev = pdev;
  1789. interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
  1790. FM10K_UC_ADDR_SIZE);
  1791. if (!interface->uc_addr) {
  1792. err = -EIO;
  1793. goto err_ioremap;
  1794. }
  1795. err = fm10k_sw_init(interface, ent);
  1796. if (err)
  1797. goto err_sw_init;
  1798. /* enable debugfs support */
  1799. fm10k_dbg_intfc_init(interface);
  1800. err = fm10k_init_queueing_scheme(interface);
  1801. if (err)
  1802. goto err_sw_init;
  1803. /* the mbx interrupt might attempt to schedule the service task, so we
  1804. * must ensure it is disabled since we haven't yet requested the timer
  1805. * or work item.
  1806. */
  1807. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1808. err = fm10k_mbx_request_irq(interface);
  1809. if (err)
  1810. goto err_mbx_interrupt;
  1811. /* final check of hardware state before registering the interface */
  1812. err = fm10k_hw_ready(interface);
  1813. if (err)
  1814. goto err_register;
  1815. err = register_netdev(netdev);
  1816. if (err)
  1817. goto err_register;
  1818. /* carrier off reporting is important to ethtool even BEFORE open */
  1819. netif_carrier_off(netdev);
  1820. /* stop all the transmit queues from transmitting until link is up */
  1821. netif_tx_stop_all_queues(netdev);
  1822. /* Initialize service timer and service task late in order to avoid
  1823. * cleanup issues.
  1824. */
  1825. timer_setup(&interface->service_timer, fm10k_service_timer, 0);
  1826. INIT_WORK(&interface->service_task, fm10k_service_task);
  1827. /* Setup the MAC/VLAN queue */
  1828. INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task);
  1829. /* kick off service timer now, even when interface is down */
  1830. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  1831. /* print warning for non-optimal configurations */
  1832. pcie_print_link_status(interface->pdev);
  1833. /* report MAC address for logging */
  1834. dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
  1835. /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
  1836. fm10k_iov_configure(pdev, 0);
  1837. /* clear the service task disable bit and kick off service task */
  1838. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1839. fm10k_service_event_schedule(interface);
  1840. return 0;
  1841. err_register:
  1842. fm10k_mbx_free_irq(interface);
  1843. err_mbx_interrupt:
  1844. fm10k_clear_queueing_scheme(interface);
  1845. err_sw_init:
  1846. if (interface->sw_addr)
  1847. iounmap(interface->sw_addr);
  1848. iounmap(interface->uc_addr);
  1849. err_ioremap:
  1850. free_netdev(netdev);
  1851. err_alloc_netdev:
  1852. pci_release_mem_regions(pdev);
  1853. err_pci_reg:
  1854. err_dma:
  1855. pci_disable_device(pdev);
  1856. return err;
  1857. }
  1858. /**
  1859. * fm10k_remove - Device Removal Routine
  1860. * @pdev: PCI device information struct
  1861. *
  1862. * fm10k_remove is called by the PCI subsystem to alert the driver
  1863. * that it should release a PCI device. The could be caused by a
  1864. * Hot-Plug event, or because the driver is going to be removed from
  1865. * memory.
  1866. **/
  1867. static void fm10k_remove(struct pci_dev *pdev)
  1868. {
  1869. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1870. struct net_device *netdev = interface->netdev;
  1871. del_timer_sync(&interface->service_timer);
  1872. fm10k_stop_service_event(interface);
  1873. fm10k_stop_macvlan_task(interface);
  1874. /* Remove all pending MAC/VLAN requests */
  1875. fm10k_clear_macvlan_queue(interface, interface->glort, true);
  1876. /* free netdev, this may bounce the interrupts due to setup_tc */
  1877. if (netdev->reg_state == NETREG_REGISTERED)
  1878. unregister_netdev(netdev);
  1879. /* release VFs */
  1880. fm10k_iov_disable(pdev);
  1881. /* disable mailbox interrupt */
  1882. fm10k_mbx_free_irq(interface);
  1883. /* free interrupts */
  1884. fm10k_clear_queueing_scheme(interface);
  1885. /* remove any debugfs interfaces */
  1886. fm10k_dbg_intfc_exit(interface);
  1887. if (interface->sw_addr)
  1888. iounmap(interface->sw_addr);
  1889. iounmap(interface->uc_addr);
  1890. free_netdev(netdev);
  1891. pci_release_mem_regions(pdev);
  1892. pci_disable_pcie_error_reporting(pdev);
  1893. pci_disable_device(pdev);
  1894. }
  1895. static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
  1896. {
  1897. /* the watchdog task reads from registers, which might appear like
  1898. * a surprise remove if the PCIe device is disabled while we're
  1899. * stopped. We stop the watchdog task until after we resume software
  1900. * activity.
  1901. *
  1902. * Note that the MAC/VLAN task will be stopped as part of preparing
  1903. * for reset so we don't need to handle it here.
  1904. */
  1905. fm10k_stop_service_event(interface);
  1906. if (fm10k_prepare_for_reset(interface))
  1907. set_bit(__FM10K_RESET_SUSPENDED, interface->state);
  1908. }
  1909. static int fm10k_handle_resume(struct fm10k_intfc *interface)
  1910. {
  1911. struct fm10k_hw *hw = &interface->hw;
  1912. int err;
  1913. /* Even if we didn't properly prepare for reset in
  1914. * fm10k_prepare_suspend, we'll attempt to resume anyways.
  1915. */
  1916. if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state))
  1917. dev_warn(&interface->pdev->dev,
  1918. "Device was shut down as part of suspend... Attempting to recover\n");
  1919. /* reset statistics starting values */
  1920. hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
  1921. err = fm10k_handle_reset(interface);
  1922. if (err)
  1923. return err;
  1924. /* assume host is not ready, to prevent race with watchdog in case we
  1925. * actually don't have connection to the switch
  1926. */
  1927. interface->host_ready = false;
  1928. fm10k_watchdog_host_not_ready(interface);
  1929. /* force link to stay down for a second to prevent link flutter */
  1930. interface->link_down_event = jiffies + (HZ);
  1931. set_bit(__FM10K_LINK_DOWN, interface->state);
  1932. /* restart the service task */
  1933. fm10k_start_service_event(interface);
  1934. /* Restart the MAC/VLAN request queue in-case of outstanding events */
  1935. fm10k_macvlan_schedule(interface);
  1936. return err;
  1937. }
  1938. /**
  1939. * fm10k_resume - Generic PM resume hook
  1940. * @dev: generic device structure
  1941. *
  1942. * Generic PM hook used when waking the device from a low power state after
  1943. * suspend or hibernation. This function does not need to handle lower PCIe
  1944. * device state as the stack takes care of that for us.
  1945. **/
  1946. static int __maybe_unused fm10k_resume(struct device *dev)
  1947. {
  1948. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  1949. struct net_device *netdev = interface->netdev;
  1950. struct fm10k_hw *hw = &interface->hw;
  1951. int err;
  1952. /* refresh hw_addr in case it was dropped */
  1953. hw->hw_addr = interface->uc_addr;
  1954. err = fm10k_handle_resume(interface);
  1955. if (err)
  1956. return err;
  1957. netif_device_attach(netdev);
  1958. return 0;
  1959. }
  1960. /**
  1961. * fm10k_suspend - Generic PM suspend hook
  1962. * @dev: generic device structure
  1963. *
  1964. * Generic PM hook used when setting the device into a low power state for
  1965. * system suspend or hibernation. This function does not need to handle lower
  1966. * PCIe device state as the stack takes care of that for us.
  1967. **/
  1968. static int __maybe_unused fm10k_suspend(struct device *dev)
  1969. {
  1970. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  1971. struct net_device *netdev = interface->netdev;
  1972. netif_device_detach(netdev);
  1973. fm10k_prepare_suspend(interface);
  1974. return 0;
  1975. }
  1976. /**
  1977. * fm10k_io_error_detected - called when PCI error is detected
  1978. * @pdev: Pointer to PCI device
  1979. * @state: The current pci connection state
  1980. *
  1981. * This function is called after a PCI bus error affecting
  1982. * this device has been detected.
  1983. */
  1984. static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
  1985. pci_channel_state_t state)
  1986. {
  1987. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1988. struct net_device *netdev = interface->netdev;
  1989. netif_device_detach(netdev);
  1990. if (state == pci_channel_io_perm_failure)
  1991. return PCI_ERS_RESULT_DISCONNECT;
  1992. fm10k_prepare_suspend(interface);
  1993. /* Request a slot reset. */
  1994. return PCI_ERS_RESULT_NEED_RESET;
  1995. }
  1996. /**
  1997. * fm10k_io_slot_reset - called after the pci bus has been reset.
  1998. * @pdev: Pointer to PCI device
  1999. *
  2000. * Restart the card from scratch, as if from a cold-boot.
  2001. */
  2002. static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
  2003. {
  2004. pci_ers_result_t result;
  2005. if (pci_reenable_device(pdev)) {
  2006. dev_err(&pdev->dev,
  2007. "Cannot re-enable PCI device after reset.\n");
  2008. result = PCI_ERS_RESULT_DISCONNECT;
  2009. } else {
  2010. pci_set_master(pdev);
  2011. pci_restore_state(pdev);
  2012. /* After second error pci->state_saved is false, this
  2013. * resets it so EEH doesn't break.
  2014. */
  2015. pci_save_state(pdev);
  2016. pci_wake_from_d3(pdev, false);
  2017. result = PCI_ERS_RESULT_RECOVERED;
  2018. }
  2019. pci_cleanup_aer_uncorrect_error_status(pdev);
  2020. return result;
  2021. }
  2022. /**
  2023. * fm10k_io_resume - called when traffic can start flowing again.
  2024. * @pdev: Pointer to PCI device
  2025. *
  2026. * This callback is called when the error recovery driver tells us that
  2027. * its OK to resume normal operation.
  2028. */
  2029. static void fm10k_io_resume(struct pci_dev *pdev)
  2030. {
  2031. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2032. struct net_device *netdev = interface->netdev;
  2033. int err;
  2034. err = fm10k_handle_resume(interface);
  2035. if (err)
  2036. dev_warn(&pdev->dev,
  2037. "%s failed: %d\n", __func__, err);
  2038. else
  2039. netif_device_attach(netdev);
  2040. }
  2041. /**
  2042. * fm10k_io_reset_prepare - called when PCI function is about to be reset
  2043. * @pdev: Pointer to PCI device
  2044. *
  2045. * This callback is called when the PCI function is about to be reset,
  2046. * allowing the device driver to prepare for it.
  2047. */
  2048. static void fm10k_io_reset_prepare(struct pci_dev *pdev)
  2049. {
  2050. /* warn incase we have any active VF devices */
  2051. if (pci_num_vf(pdev))
  2052. dev_warn(&pdev->dev,
  2053. "PCIe FLR may cause issues for any active VF devices\n");
  2054. fm10k_prepare_suspend(pci_get_drvdata(pdev));
  2055. }
  2056. /**
  2057. * fm10k_io_reset_done - called when PCI function has finished resetting
  2058. * @pdev: Pointer to PCI device
  2059. *
  2060. * This callback is called just after the PCI function is reset, such as via
  2061. * /sys/class/net/<enpX>/device/reset or similar.
  2062. */
  2063. static void fm10k_io_reset_done(struct pci_dev *pdev)
  2064. {
  2065. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2066. int err = fm10k_handle_resume(interface);
  2067. if (err) {
  2068. dev_warn(&pdev->dev,
  2069. "%s failed: %d\n", __func__, err);
  2070. netif_device_detach(interface->netdev);
  2071. }
  2072. }
  2073. static const struct pci_error_handlers fm10k_err_handler = {
  2074. .error_detected = fm10k_io_error_detected,
  2075. .slot_reset = fm10k_io_slot_reset,
  2076. .resume = fm10k_io_resume,
  2077. .reset_prepare = fm10k_io_reset_prepare,
  2078. .reset_done = fm10k_io_reset_done,
  2079. };
  2080. static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
  2081. static struct pci_driver fm10k_driver = {
  2082. .name = fm10k_driver_name,
  2083. .id_table = fm10k_pci_tbl,
  2084. .probe = fm10k_probe,
  2085. .remove = fm10k_remove,
  2086. .driver = {
  2087. .pm = &fm10k_pm_ops,
  2088. },
  2089. .sriov_configure = fm10k_iov_configure,
  2090. .err_handler = &fm10k_err_handler
  2091. };
  2092. /**
  2093. * fm10k_register_pci_driver - register driver interface
  2094. *
  2095. * This function is called on module load in order to register the driver.
  2096. **/
  2097. int fm10k_register_pci_driver(void)
  2098. {
  2099. return pci_register_driver(&fm10k_driver);
  2100. }
  2101. /**
  2102. * fm10k_unregister_pci_driver - unregister driver interface
  2103. *
  2104. * This function is called on module unload in order to remove the driver.
  2105. **/
  2106. void fm10k_unregister_pci_driver(void)
  2107. {
  2108. pci_unregister_driver(&fm10k_driver);
  2109. }