fm10k_pci.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include <linux/module.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/aer.h>
  6. #include "fm10k.h"
  7. static const struct fm10k_info *fm10k_info_tbl[] = {
  8. [fm10k_device_pf] = &fm10k_pf_info,
  9. [fm10k_device_vf] = &fm10k_vf_info,
  10. };
  11. /*
  12. * fm10k_pci_tbl - PCI Device ID Table
  13. *
  14. * Wildcard entries (PCI_ANY_ID) should come last
  15. * Last entry must be all 0s
  16. *
  17. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  18. * Class, Class Mask, private data (not used) }
  19. */
  20. static const struct pci_device_id fm10k_pci_tbl[] = {
  21. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
  22. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
  23. /* required last entry */
  24. { 0, }
  25. };
  26. MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
  27. u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
  28. {
  29. struct fm10k_intfc *interface = hw->back;
  30. u16 value = 0;
  31. if (FM10K_REMOVED(hw->hw_addr))
  32. return ~value;
  33. pci_read_config_word(interface->pdev, reg, &value);
  34. if (value == 0xFFFF)
  35. fm10k_write_flush(hw);
  36. return value;
  37. }
  38. u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
  39. {
  40. u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  41. u32 value = 0;
  42. if (FM10K_REMOVED(hw_addr))
  43. return ~value;
  44. value = readl(&hw_addr[reg]);
  45. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  46. struct fm10k_intfc *interface = hw->back;
  47. struct net_device *netdev = interface->netdev;
  48. hw->hw_addr = NULL;
  49. netif_device_detach(netdev);
  50. netdev_err(netdev, "PCIe link lost, device now detached\n");
  51. }
  52. return value;
  53. }
  54. static int fm10k_hw_ready(struct fm10k_intfc *interface)
  55. {
  56. struct fm10k_hw *hw = &interface->hw;
  57. fm10k_write_flush(hw);
  58. return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
  59. }
  60. /**
  61. * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task
  62. * @interface: fm10k private interface structure
  63. *
  64. * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be
  65. * started immediately, request that it be restarted when possible.
  66. */
  67. void fm10k_macvlan_schedule(struct fm10k_intfc *interface)
  68. {
  69. /* Avoid processing the MAC/VLAN queue when the service task is
  70. * disabled, or when we're resetting the device.
  71. */
  72. if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) &&
  73. !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) {
  74. clear_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  75. /* We delay the actual start of execution in order to allow
  76. * multiple MAC/VLAN updates to accumulate before handling
  77. * them, and to allow some time to let the mailbox drain
  78. * between runs.
  79. */
  80. queue_delayed_work(fm10k_workqueue,
  81. &interface->macvlan_task, 10);
  82. } else {
  83. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  84. }
  85. }
  86. /**
  87. * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor
  88. * @interface: fm10k private interface structure
  89. *
  90. * Wait until the MAC/VLAN queue task has stopped, and cancel any future
  91. * requests.
  92. */
  93. static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface)
  94. {
  95. /* Disable the MAC/VLAN work item */
  96. set_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  97. /* Make sure we waited until any current invocations have stopped */
  98. cancel_delayed_work_sync(&interface->macvlan_task);
  99. /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task.
  100. * However, it may not be unset of the MAC/VLAN task never actually
  101. * got a chance to run. Since we've canceled the task here, and it
  102. * cannot be rescheuled right now, we need to ensure the scheduled bit
  103. * gets unset.
  104. */
  105. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  106. }
  107. /**
  108. * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor
  109. * @interface: fm10k private interface structure
  110. *
  111. * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule
  112. * the MAC/VLAN work monitor.
  113. */
  114. static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface)
  115. {
  116. /* Re-enable the MAC/VLAN work item */
  117. clear_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  118. /* We might have received a MAC/VLAN request while disabled. If so,
  119. * kick off the queue now.
  120. */
  121. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  122. fm10k_macvlan_schedule(interface);
  123. }
  124. void fm10k_service_event_schedule(struct fm10k_intfc *interface)
  125. {
  126. if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
  127. !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
  128. clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
  129. queue_work(fm10k_workqueue, &interface->service_task);
  130. } else {
  131. set_bit(__FM10K_SERVICE_REQUEST, interface->state);
  132. }
  133. }
  134. static void fm10k_service_event_complete(struct fm10k_intfc *interface)
  135. {
  136. WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
  137. /* flush memory to make sure state is correct before next watchog */
  138. smp_mb__before_atomic();
  139. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  140. /* If a service event was requested since we started, immediately
  141. * re-schedule now. This ensures we don't drop a request until the
  142. * next timer event.
  143. */
  144. if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
  145. fm10k_service_event_schedule(interface);
  146. }
  147. static void fm10k_stop_service_event(struct fm10k_intfc *interface)
  148. {
  149. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  150. cancel_work_sync(&interface->service_task);
  151. /* It's possible that cancel_work_sync stopped the service task from
  152. * running before it could actually start. In this case the
  153. * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that
  154. * the service task cannot be running at this point, we need to clear
  155. * the scheduled bit, as otherwise the service task may never be
  156. * restarted.
  157. */
  158. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  159. }
  160. static void fm10k_start_service_event(struct fm10k_intfc *interface)
  161. {
  162. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  163. fm10k_service_event_schedule(interface);
  164. }
  165. /**
  166. * fm10k_service_timer - Timer Call-back
  167. * @t: pointer to timer data
  168. **/
  169. static void fm10k_service_timer(struct timer_list *t)
  170. {
  171. struct fm10k_intfc *interface = from_timer(interface, t,
  172. service_timer);
  173. /* Reset the timer */
  174. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  175. fm10k_service_event_schedule(interface);
  176. }
  177. /**
  178. * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset
  179. * @interface: fm10k private data structure
  180. *
  181. * This function prepares for a device reset by shutting as much down as we
  182. * can. It does nothing and returns false if __FM10K_RESETTING was already set
  183. * prior to calling this function. It returns true if it actually did work.
  184. */
  185. static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
  186. {
  187. struct net_device *netdev = interface->netdev;
  188. WARN_ON(in_interrupt());
  189. /* put off any impending NetWatchDogTimeout */
  190. netif_trans_update(netdev);
  191. /* Nothing to do if a reset is already in progress */
  192. if (test_and_set_bit(__FM10K_RESETTING, interface->state))
  193. return false;
  194. /* As the MAC/VLAN task will be accessing registers it must not be
  195. * running while we reset. Although the task will not be scheduled
  196. * once we start resetting it may already be running
  197. */
  198. fm10k_stop_macvlan_task(interface);
  199. rtnl_lock();
  200. fm10k_iov_suspend(interface->pdev);
  201. if (netif_running(netdev))
  202. fm10k_close(netdev);
  203. fm10k_mbx_free_irq(interface);
  204. /* free interrupts */
  205. fm10k_clear_queueing_scheme(interface);
  206. /* delay any future reset requests */
  207. interface->last_reset = jiffies + (10 * HZ);
  208. rtnl_unlock();
  209. return true;
  210. }
  211. static int fm10k_handle_reset(struct fm10k_intfc *interface)
  212. {
  213. struct net_device *netdev = interface->netdev;
  214. struct fm10k_hw *hw = &interface->hw;
  215. int err;
  216. WARN_ON(!test_bit(__FM10K_RESETTING, interface->state));
  217. rtnl_lock();
  218. pci_set_master(interface->pdev);
  219. /* reset and initialize the hardware so it is in a known state */
  220. err = hw->mac.ops.reset_hw(hw);
  221. if (err) {
  222. dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
  223. goto reinit_err;
  224. }
  225. err = hw->mac.ops.init_hw(hw);
  226. if (err) {
  227. dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
  228. goto reinit_err;
  229. }
  230. err = fm10k_init_queueing_scheme(interface);
  231. if (err) {
  232. dev_err(&interface->pdev->dev,
  233. "init_queueing_scheme failed: %d\n", err);
  234. goto reinit_err;
  235. }
  236. /* re-associate interrupts */
  237. err = fm10k_mbx_request_irq(interface);
  238. if (err)
  239. goto err_mbx_irq;
  240. err = fm10k_hw_ready(interface);
  241. if (err)
  242. goto err_open;
  243. /* update hardware address for VFs if perm_addr has changed */
  244. if (hw->mac.type == fm10k_mac_vf) {
  245. if (is_valid_ether_addr(hw->mac.perm_addr)) {
  246. ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
  247. ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
  248. ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
  249. netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
  250. }
  251. if (hw->mac.vlan_override)
  252. netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
  253. else
  254. netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  255. }
  256. err = netif_running(netdev) ? fm10k_open(netdev) : 0;
  257. if (err)
  258. goto err_open;
  259. fm10k_iov_resume(interface->pdev);
  260. rtnl_unlock();
  261. fm10k_resume_macvlan_task(interface);
  262. clear_bit(__FM10K_RESETTING, interface->state);
  263. return err;
  264. err_open:
  265. fm10k_mbx_free_irq(interface);
  266. err_mbx_irq:
  267. fm10k_clear_queueing_scheme(interface);
  268. reinit_err:
  269. netif_device_detach(netdev);
  270. rtnl_unlock();
  271. clear_bit(__FM10K_RESETTING, interface->state);
  272. return err;
  273. }
  274. static void fm10k_detach_subtask(struct fm10k_intfc *interface)
  275. {
  276. struct net_device *netdev = interface->netdev;
  277. u32 __iomem *hw_addr;
  278. u32 value;
  279. int err;
  280. /* do nothing if netdev is still present or hw_addr is set */
  281. if (netif_device_present(netdev) || interface->hw.hw_addr)
  282. return;
  283. /* We've lost the PCIe register space, and can no longer access the
  284. * device. Shut everything except the detach subtask down and prepare
  285. * to reset the device in case we recover. If we actually prepare for
  286. * reset, indicate that we're detached.
  287. */
  288. if (fm10k_prepare_for_reset(interface))
  289. set_bit(__FM10K_RESET_DETACHED, interface->state);
  290. /* check the real address space to see if we've recovered */
  291. hw_addr = READ_ONCE(interface->uc_addr);
  292. value = readl(hw_addr);
  293. if (~value) {
  294. /* Make sure the reset was initiated because we detached,
  295. * otherwise we might race with a different reset flow.
  296. */
  297. if (!test_and_clear_bit(__FM10K_RESET_DETACHED,
  298. interface->state))
  299. return;
  300. /* Restore the hardware address */
  301. interface->hw.hw_addr = interface->uc_addr;
  302. /* PCIe link has been restored, and the device is active
  303. * again. Restore everything and reset the device.
  304. */
  305. err = fm10k_handle_reset(interface);
  306. if (err) {
  307. netdev_err(netdev, "Unable to reset device: %d\n", err);
  308. interface->hw.hw_addr = NULL;
  309. return;
  310. }
  311. /* Re-attach the netdev */
  312. netif_device_attach(netdev);
  313. netdev_warn(netdev, "PCIe link restored, device now attached\n");
  314. return;
  315. }
  316. }
  317. static void fm10k_reset_subtask(struct fm10k_intfc *interface)
  318. {
  319. int err;
  320. if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
  321. interface->flags))
  322. return;
  323. /* If another thread has already prepared to reset the device, we
  324. * should not attempt to handle a reset here, since we'd race with
  325. * that thread. This may happen if we suspend the device or if the
  326. * PCIe link is lost. In this case, we'll just ignore the RESET
  327. * request, as it will (eventually) be taken care of when the thread
  328. * which actually started the reset is finished.
  329. */
  330. if (!fm10k_prepare_for_reset(interface))
  331. return;
  332. netdev_err(interface->netdev, "Reset interface\n");
  333. err = fm10k_handle_reset(interface);
  334. if (err)
  335. dev_err(&interface->pdev->dev,
  336. "fm10k_handle_reset failed: %d\n", err);
  337. }
  338. /**
  339. * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
  340. * @interface: board private structure
  341. *
  342. * Configure the SWPRI to PC mapping for the port.
  343. **/
  344. static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
  345. {
  346. struct net_device *netdev = interface->netdev;
  347. struct fm10k_hw *hw = &interface->hw;
  348. int i;
  349. /* clear flag indicating update is needed */
  350. clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
  351. /* these registers are only available on the PF */
  352. if (hw->mac.type != fm10k_mac_pf)
  353. return;
  354. /* configure SWPRI to PC map */
  355. for (i = 0; i < FM10K_SWPRI_MAX; i++)
  356. fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
  357. netdev_get_prio_tc_map(netdev, i));
  358. }
  359. /**
  360. * fm10k_watchdog_update_host_state - Update the link status based on host.
  361. * @interface: board private structure
  362. **/
  363. static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
  364. {
  365. struct fm10k_hw *hw = &interface->hw;
  366. s32 err;
  367. if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
  368. interface->host_ready = false;
  369. if (time_is_after_jiffies(interface->link_down_event))
  370. return;
  371. clear_bit(__FM10K_LINK_DOWN, interface->state);
  372. }
  373. if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
  374. if (rtnl_trylock()) {
  375. fm10k_configure_swpri_map(interface);
  376. rtnl_unlock();
  377. }
  378. }
  379. /* lock the mailbox for transmit and receive */
  380. fm10k_mbx_lock(interface);
  381. err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
  382. if (err && time_is_before_jiffies(interface->last_reset))
  383. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  384. /* free the lock */
  385. fm10k_mbx_unlock(interface);
  386. }
  387. /**
  388. * fm10k_mbx_subtask - Process upstream and downstream mailboxes
  389. * @interface: board private structure
  390. *
  391. * This function will process both the upstream and downstream mailboxes.
  392. **/
  393. static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
  394. {
  395. /* If we're resetting, bail out */
  396. if (test_bit(__FM10K_RESETTING, interface->state))
  397. return;
  398. /* process upstream mailbox and update device state */
  399. fm10k_watchdog_update_host_state(interface);
  400. /* process downstream mailboxes */
  401. fm10k_iov_mbx(interface);
  402. }
  403. /**
  404. * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
  405. * @interface: board private structure
  406. **/
  407. static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
  408. {
  409. struct net_device *netdev = interface->netdev;
  410. /* only continue if link state is currently down */
  411. if (netif_carrier_ok(netdev))
  412. return;
  413. netif_info(interface, drv, netdev, "NIC Link is up\n");
  414. netif_carrier_on(netdev);
  415. netif_tx_wake_all_queues(netdev);
  416. }
  417. /**
  418. * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
  419. * @interface: board private structure
  420. **/
  421. static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
  422. {
  423. struct net_device *netdev = interface->netdev;
  424. /* only continue if link state is currently up */
  425. if (!netif_carrier_ok(netdev))
  426. return;
  427. netif_info(interface, drv, netdev, "NIC Link is down\n");
  428. netif_carrier_off(netdev);
  429. netif_tx_stop_all_queues(netdev);
  430. }
  431. /**
  432. * fm10k_update_stats - Update the board statistics counters.
  433. * @interface: board private structure
  434. **/
  435. void fm10k_update_stats(struct fm10k_intfc *interface)
  436. {
  437. struct net_device_stats *net_stats = &interface->netdev->stats;
  438. struct fm10k_hw *hw = &interface->hw;
  439. u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
  440. u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
  441. u64 rx_link_errors = 0;
  442. u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
  443. u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
  444. u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
  445. u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
  446. u64 bytes, pkts;
  447. int i;
  448. /* ensure only one thread updates stats at a time */
  449. if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  450. return;
  451. /* do not allow stats update via service task for next second */
  452. interface->next_stats_update = jiffies + HZ;
  453. /* gather some stats to the interface struct that are per queue */
  454. for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
  455. struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
  456. if (!tx_ring)
  457. continue;
  458. restart_queue += tx_ring->tx_stats.restart_queue;
  459. tx_busy += tx_ring->tx_stats.tx_busy;
  460. tx_csum_errors += tx_ring->tx_stats.csum_err;
  461. bytes += tx_ring->stats.bytes;
  462. pkts += tx_ring->stats.packets;
  463. hw_csum_tx_good += tx_ring->tx_stats.csum_good;
  464. }
  465. interface->restart_queue = restart_queue;
  466. interface->tx_busy = tx_busy;
  467. net_stats->tx_bytes = bytes;
  468. net_stats->tx_packets = pkts;
  469. interface->tx_csum_errors = tx_csum_errors;
  470. interface->hw_csum_tx_good = hw_csum_tx_good;
  471. /* gather some stats to the interface struct that are per queue */
  472. for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
  473. struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
  474. if (!rx_ring)
  475. continue;
  476. bytes += rx_ring->stats.bytes;
  477. pkts += rx_ring->stats.packets;
  478. alloc_failed += rx_ring->rx_stats.alloc_failed;
  479. rx_csum_errors += rx_ring->rx_stats.csum_err;
  480. rx_errors += rx_ring->rx_stats.errors;
  481. hw_csum_rx_good += rx_ring->rx_stats.csum_good;
  482. rx_switch_errors += rx_ring->rx_stats.switch_errors;
  483. rx_drops += rx_ring->rx_stats.drops;
  484. rx_pp_errors += rx_ring->rx_stats.pp_errors;
  485. rx_link_errors += rx_ring->rx_stats.link_errors;
  486. rx_length_errors += rx_ring->rx_stats.length_errors;
  487. }
  488. net_stats->rx_bytes = bytes;
  489. net_stats->rx_packets = pkts;
  490. interface->alloc_failed = alloc_failed;
  491. interface->rx_csum_errors = rx_csum_errors;
  492. interface->hw_csum_rx_good = hw_csum_rx_good;
  493. interface->rx_switch_errors = rx_switch_errors;
  494. interface->rx_drops = rx_drops;
  495. interface->rx_pp_errors = rx_pp_errors;
  496. interface->rx_link_errors = rx_link_errors;
  497. interface->rx_length_errors = rx_length_errors;
  498. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  499. for (i = 0; i < hw->mac.max_queues; i++) {
  500. struct fm10k_hw_stats_q *q = &interface->stats.q[i];
  501. tx_bytes_nic += q->tx_bytes.count;
  502. tx_pkts_nic += q->tx_packets.count;
  503. rx_bytes_nic += q->rx_bytes.count;
  504. rx_pkts_nic += q->rx_packets.count;
  505. rx_drops_nic += q->rx_drops.count;
  506. }
  507. interface->tx_bytes_nic = tx_bytes_nic;
  508. interface->tx_packets_nic = tx_pkts_nic;
  509. interface->rx_bytes_nic = rx_bytes_nic;
  510. interface->rx_packets_nic = rx_pkts_nic;
  511. interface->rx_drops_nic = rx_drops_nic;
  512. /* Fill out the OS statistics structure */
  513. net_stats->rx_errors = rx_errors;
  514. net_stats->rx_dropped = interface->stats.nodesc_drop.count;
  515. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  516. }
  517. /**
  518. * fm10k_watchdog_flush_tx - flush queues on host not ready
  519. * @interface: pointer to the device interface structure
  520. **/
  521. static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
  522. {
  523. int some_tx_pending = 0;
  524. int i;
  525. /* nothing to do if carrier is up */
  526. if (netif_carrier_ok(interface->netdev))
  527. return;
  528. for (i = 0; i < interface->num_tx_queues; i++) {
  529. struct fm10k_ring *tx_ring = interface->tx_ring[i];
  530. if (tx_ring->next_to_use != tx_ring->next_to_clean) {
  531. some_tx_pending = 1;
  532. break;
  533. }
  534. }
  535. /* We've lost link, so the controller stops DMA, but we've got
  536. * queued Tx work that's never going to get done, so reset
  537. * controller to flush Tx.
  538. */
  539. if (some_tx_pending)
  540. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  541. }
  542. /**
  543. * fm10k_watchdog_subtask - check and bring link up
  544. * @interface: pointer to the device interface structure
  545. **/
  546. static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
  547. {
  548. /* if interface is down do nothing */
  549. if (test_bit(__FM10K_DOWN, interface->state) ||
  550. test_bit(__FM10K_RESETTING, interface->state))
  551. return;
  552. if (interface->host_ready)
  553. fm10k_watchdog_host_is_ready(interface);
  554. else
  555. fm10k_watchdog_host_not_ready(interface);
  556. /* update stats only once every second */
  557. if (time_is_before_jiffies(interface->next_stats_update))
  558. fm10k_update_stats(interface);
  559. /* flush any uncompleted work */
  560. fm10k_watchdog_flush_tx(interface);
  561. }
  562. /**
  563. * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
  564. * @interface: pointer to the device interface structure
  565. *
  566. * This function serves two purposes. First it strobes the interrupt lines
  567. * in order to make certain interrupts are occurring. Secondly it sets the
  568. * bits needed to check for TX hangs. As a result we should immediately
  569. * determine if a hang has occurred.
  570. */
  571. static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
  572. {
  573. int i;
  574. /* If we're down or resetting, just bail */
  575. if (test_bit(__FM10K_DOWN, interface->state) ||
  576. test_bit(__FM10K_RESETTING, interface->state))
  577. return;
  578. /* rate limit tx hang checks to only once every 2 seconds */
  579. if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
  580. return;
  581. interface->next_tx_hang_check = jiffies + (2 * HZ);
  582. if (netif_carrier_ok(interface->netdev)) {
  583. /* Force detection of hung controller */
  584. for (i = 0; i < interface->num_tx_queues; i++)
  585. set_check_for_tx_hang(interface->tx_ring[i]);
  586. /* Rearm all in-use q_vectors for immediate firing */
  587. for (i = 0; i < interface->num_q_vectors; i++) {
  588. struct fm10k_q_vector *qv = interface->q_vector[i];
  589. if (!qv->tx.count && !qv->rx.count)
  590. continue;
  591. writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
  592. }
  593. }
  594. }
  595. /**
  596. * fm10k_service_task - manages and runs subtasks
  597. * @work: pointer to work_struct containing our data
  598. **/
  599. static void fm10k_service_task(struct work_struct *work)
  600. {
  601. struct fm10k_intfc *interface;
  602. interface = container_of(work, struct fm10k_intfc, service_task);
  603. /* Check whether we're detached first */
  604. fm10k_detach_subtask(interface);
  605. /* tasks run even when interface is down */
  606. fm10k_mbx_subtask(interface);
  607. fm10k_reset_subtask(interface);
  608. /* tasks only run when interface is up */
  609. fm10k_watchdog_subtask(interface);
  610. fm10k_check_hang_subtask(interface);
  611. /* release lock on service events to allow scheduling next event */
  612. fm10k_service_event_complete(interface);
  613. }
  614. /**
  615. * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager
  616. * @work: pointer to work_struct containing our data
  617. *
  618. * This work item handles sending MAC/VLAN updates to the switch manager. When
  619. * the interface is up, it will attempt to queue mailbox messages to the
  620. * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the
  621. * mailbox is full, it will reschedule itself to try again in a short while.
  622. * This ensures that the driver does not overload the switch mailbox with too
  623. * many simultaneous requests, causing an unnecessary reset.
  624. **/
  625. static void fm10k_macvlan_task(struct work_struct *work)
  626. {
  627. struct fm10k_macvlan_request *item;
  628. struct fm10k_intfc *interface;
  629. struct delayed_work *dwork;
  630. struct list_head *requests;
  631. struct fm10k_hw *hw;
  632. unsigned long flags;
  633. dwork = to_delayed_work(work);
  634. interface = container_of(dwork, struct fm10k_intfc, macvlan_task);
  635. hw = &interface->hw;
  636. requests = &interface->macvlan_requests;
  637. do {
  638. /* Pop the first item off the list */
  639. spin_lock_irqsave(&interface->macvlan_lock, flags);
  640. item = list_first_entry_or_null(requests,
  641. struct fm10k_macvlan_request,
  642. list);
  643. if (item)
  644. list_del_init(&item->list);
  645. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  646. /* We have no more items to process */
  647. if (!item)
  648. goto done;
  649. fm10k_mbx_lock(interface);
  650. /* Check that we have plenty of space to send the message. We
  651. * want to ensure that the mailbox stays low enough to avoid a
  652. * change in the host state, otherwise we may see spurious
  653. * link up / link down notifications.
  654. */
  655. if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) {
  656. hw->mbx.ops.process(hw, &hw->mbx);
  657. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  658. fm10k_mbx_unlock(interface);
  659. /* Put the request back on the list */
  660. spin_lock_irqsave(&interface->macvlan_lock, flags);
  661. list_add(&item->list, requests);
  662. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  663. break;
  664. }
  665. switch (item->type) {
  666. case FM10K_MC_MAC_REQUEST:
  667. hw->mac.ops.update_mc_addr(hw,
  668. item->mac.glort,
  669. item->mac.addr,
  670. item->mac.vid,
  671. item->set);
  672. break;
  673. case FM10K_UC_MAC_REQUEST:
  674. hw->mac.ops.update_uc_addr(hw,
  675. item->mac.glort,
  676. item->mac.addr,
  677. item->mac.vid,
  678. item->set,
  679. 0);
  680. break;
  681. case FM10K_VLAN_REQUEST:
  682. hw->mac.ops.update_vlan(hw,
  683. item->vlan.vid,
  684. item->vlan.vsi,
  685. item->set);
  686. break;
  687. default:
  688. break;
  689. }
  690. fm10k_mbx_unlock(interface);
  691. /* Free the item now that we've sent the update */
  692. kfree(item);
  693. } while (true);
  694. done:
  695. WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state));
  696. /* flush memory to make sure state is correct */
  697. smp_mb__before_atomic();
  698. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  699. /* If a MAC/VLAN request was scheduled since we started, we should
  700. * re-schedule. However, there is no reason to re-schedule if there is
  701. * no work to do.
  702. */
  703. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  704. fm10k_macvlan_schedule(interface);
  705. }
  706. /**
  707. * fm10k_configure_tx_ring - Configure Tx ring after Reset
  708. * @interface: board private structure
  709. * @ring: structure containing ring specific data
  710. *
  711. * Configure the Tx descriptor ring after a reset.
  712. **/
  713. static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
  714. struct fm10k_ring *ring)
  715. {
  716. struct fm10k_hw *hw = &interface->hw;
  717. u64 tdba = ring->dma;
  718. u32 size = ring->count * sizeof(struct fm10k_tx_desc);
  719. u32 txint = FM10K_INT_MAP_DISABLE;
  720. u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
  721. u8 reg_idx = ring->reg_idx;
  722. /* disable queue to avoid issues while updating state */
  723. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
  724. fm10k_write_flush(hw);
  725. /* possible poll here to verify ring resources have been cleaned */
  726. /* set location and size for descriptor ring */
  727. fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
  728. fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
  729. fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
  730. /* reset head and tail pointers */
  731. fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
  732. fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
  733. /* store tail pointer */
  734. ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
  735. /* reset ntu and ntc to place SW in sync with hardware */
  736. ring->next_to_clean = 0;
  737. ring->next_to_use = 0;
  738. /* Map interrupt */
  739. if (ring->q_vector) {
  740. txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  741. txint |= FM10K_INT_MAP_TIMER0;
  742. }
  743. fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
  744. /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
  745. fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
  746. FM10K_PFVTCTL_FTAG_DESC_ENABLE);
  747. /* Initialize XPS */
  748. if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
  749. ring->q_vector)
  750. netif_set_xps_queue(ring->netdev,
  751. &ring->q_vector->affinity_mask,
  752. ring->queue_index);
  753. /* enable queue */
  754. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
  755. }
  756. /**
  757. * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
  758. * @interface: board private structure
  759. * @ring: structure containing ring specific data
  760. *
  761. * Verify the Tx descriptor ring is ready for transmit.
  762. **/
  763. static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
  764. struct fm10k_ring *ring)
  765. {
  766. struct fm10k_hw *hw = &interface->hw;
  767. int wait_loop = 10;
  768. u32 txdctl;
  769. u8 reg_idx = ring->reg_idx;
  770. /* if we are already enabled just exit */
  771. if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
  772. return;
  773. /* poll to verify queue is enabled */
  774. do {
  775. usleep_range(1000, 2000);
  776. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
  777. } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
  778. if (!wait_loop)
  779. netif_err(interface, drv, interface->netdev,
  780. "Could not enable Tx Queue %d\n", reg_idx);
  781. }
  782. /**
  783. * fm10k_configure_tx - Configure Transmit Unit after Reset
  784. * @interface: board private structure
  785. *
  786. * Configure the Tx unit of the MAC after a reset.
  787. **/
  788. static void fm10k_configure_tx(struct fm10k_intfc *interface)
  789. {
  790. int i;
  791. /* Setup the HW Tx Head and Tail descriptor pointers */
  792. for (i = 0; i < interface->num_tx_queues; i++)
  793. fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
  794. /* poll here to verify that Tx rings are now enabled */
  795. for (i = 0; i < interface->num_tx_queues; i++)
  796. fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
  797. }
  798. /**
  799. * fm10k_configure_rx_ring - Configure Rx ring after Reset
  800. * @interface: board private structure
  801. * @ring: structure containing ring specific data
  802. *
  803. * Configure the Rx descriptor ring after a reset.
  804. **/
  805. static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
  806. struct fm10k_ring *ring)
  807. {
  808. u64 rdba = ring->dma;
  809. struct fm10k_hw *hw = &interface->hw;
  810. u32 size = ring->count * sizeof(union fm10k_rx_desc);
  811. u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  812. u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
  813. u32 rxint = FM10K_INT_MAP_DISABLE;
  814. u8 rx_pause = interface->rx_pause;
  815. u8 reg_idx = ring->reg_idx;
  816. /* disable queue to avoid issues while updating state */
  817. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  818. rxqctl &= ~FM10K_RXQCTL_ENABLE;
  819. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  820. fm10k_write_flush(hw);
  821. /* possible poll here to verify ring resources have been cleaned */
  822. /* set location and size for descriptor ring */
  823. fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
  824. fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
  825. fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
  826. /* reset head and tail pointers */
  827. fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
  828. fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
  829. /* store tail pointer */
  830. ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
  831. /* reset ntu and ntc to place SW in sync with hardware */
  832. ring->next_to_clean = 0;
  833. ring->next_to_use = 0;
  834. ring->next_to_alloc = 0;
  835. /* Configure the Rx buffer size for one buff without split */
  836. srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
  837. /* Configure the Rx ring to suppress loopback packets */
  838. srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
  839. fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
  840. /* Enable drop on empty */
  841. #ifdef CONFIG_DCB
  842. if (interface->pfc_en)
  843. rx_pause = interface->pfc_en;
  844. #endif
  845. if (!(rx_pause & BIT(ring->qos_pc)))
  846. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  847. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  848. /* assign default VLAN to queue */
  849. ring->vid = hw->mac.default_vid;
  850. /* if we have an active VLAN, disable default VLAN ID */
  851. if (test_bit(hw->mac.default_vid, interface->active_vlans))
  852. ring->vid |= FM10K_VLAN_CLEAR;
  853. /* Map interrupt */
  854. if (ring->q_vector) {
  855. rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  856. rxint |= FM10K_INT_MAP_TIMER1;
  857. }
  858. fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
  859. /* enable queue */
  860. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  861. rxqctl |= FM10K_RXQCTL_ENABLE;
  862. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  863. /* place buffers on ring for receive data */
  864. fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
  865. }
  866. /**
  867. * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
  868. * @interface: board private structure
  869. *
  870. * Configure the drop enable bits for the Rx rings.
  871. **/
  872. void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
  873. {
  874. struct fm10k_hw *hw = &interface->hw;
  875. u8 rx_pause = interface->rx_pause;
  876. int i;
  877. #ifdef CONFIG_DCB
  878. if (interface->pfc_en)
  879. rx_pause = interface->pfc_en;
  880. #endif
  881. for (i = 0; i < interface->num_rx_queues; i++) {
  882. struct fm10k_ring *ring = interface->rx_ring[i];
  883. u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  884. u8 reg_idx = ring->reg_idx;
  885. if (!(rx_pause & BIT(ring->qos_pc)))
  886. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  887. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  888. }
  889. }
  890. /**
  891. * fm10k_configure_dglort - Configure Receive DGLORT after reset
  892. * @interface: board private structure
  893. *
  894. * Configure the DGLORT description and RSS tables.
  895. **/
  896. static void fm10k_configure_dglort(struct fm10k_intfc *interface)
  897. {
  898. struct fm10k_dglort_cfg dglort = { 0 };
  899. struct fm10k_hw *hw = &interface->hw;
  900. int i;
  901. u32 mrqc;
  902. /* Fill out hash function seeds */
  903. for (i = 0; i < FM10K_RSSRK_SIZE; i++)
  904. fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
  905. /* Write RETA table to hardware */
  906. for (i = 0; i < FM10K_RETA_SIZE; i++)
  907. fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
  908. /* Generate RSS hash based on packet types, TCP/UDP
  909. * port numbers and/or IPv4/v6 src and dst addresses
  910. */
  911. mrqc = FM10K_MRQC_IPV4 |
  912. FM10K_MRQC_TCP_IPV4 |
  913. FM10K_MRQC_IPV6 |
  914. FM10K_MRQC_TCP_IPV6;
  915. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
  916. mrqc |= FM10K_MRQC_UDP_IPV4;
  917. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
  918. mrqc |= FM10K_MRQC_UDP_IPV6;
  919. fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
  920. /* configure default DGLORT mapping for RSS/DCB */
  921. dglort.inner_rss = 1;
  922. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  923. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  924. hw->mac.ops.configure_dglort_map(hw, &dglort);
  925. /* assign GLORT per queue for queue mapped testing */
  926. if (interface->glort_count > 64) {
  927. memset(&dglort, 0, sizeof(dglort));
  928. dglort.inner_rss = 1;
  929. dglort.glort = interface->glort + 64;
  930. dglort.idx = fm10k_dglort_pf_queue;
  931. dglort.queue_l = fls(interface->num_rx_queues - 1);
  932. hw->mac.ops.configure_dglort_map(hw, &dglort);
  933. }
  934. /* assign glort value for RSS/DCB specific to this interface */
  935. memset(&dglort, 0, sizeof(dglort));
  936. dglort.inner_rss = 1;
  937. dglort.glort = interface->glort;
  938. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  939. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  940. /* configure DGLORT mapping for RSS/DCB */
  941. dglort.idx = fm10k_dglort_pf_rss;
  942. if (interface->l2_accel)
  943. dglort.shared_l = fls(interface->l2_accel->size);
  944. hw->mac.ops.configure_dglort_map(hw, &dglort);
  945. }
  946. /**
  947. * fm10k_configure_rx - Configure Receive Unit after Reset
  948. * @interface: board private structure
  949. *
  950. * Configure the Rx unit of the MAC after a reset.
  951. **/
  952. static void fm10k_configure_rx(struct fm10k_intfc *interface)
  953. {
  954. int i;
  955. /* Configure SWPRI to PC map */
  956. fm10k_configure_swpri_map(interface);
  957. /* Configure RSS and DGLORT map */
  958. fm10k_configure_dglort(interface);
  959. /* Setup the HW Rx Head and Tail descriptor pointers */
  960. for (i = 0; i < interface->num_rx_queues; i++)
  961. fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
  962. /* possible poll here to verify that Rx rings are now enabled */
  963. }
  964. static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
  965. {
  966. struct fm10k_q_vector *q_vector;
  967. int q_idx;
  968. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  969. q_vector = interface->q_vector[q_idx];
  970. napi_enable(&q_vector->napi);
  971. }
  972. }
  973. static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
  974. {
  975. struct fm10k_q_vector *q_vector = data;
  976. if (q_vector->rx.count || q_vector->tx.count)
  977. napi_schedule_irqoff(&q_vector->napi);
  978. return IRQ_HANDLED;
  979. }
  980. static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
  981. {
  982. struct fm10k_intfc *interface = data;
  983. struct fm10k_hw *hw = &interface->hw;
  984. struct fm10k_mbx_info *mbx = &hw->mbx;
  985. /* re-enable mailbox interrupt and indicate 20us delay */
  986. fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
  987. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  988. FM10K_ITR_ENABLE);
  989. /* service upstream mailbox */
  990. if (fm10k_mbx_trylock(interface)) {
  991. mbx->ops.process(hw, mbx);
  992. fm10k_mbx_unlock(interface);
  993. }
  994. hw->mac.get_host_state = true;
  995. fm10k_service_event_schedule(interface);
  996. return IRQ_HANDLED;
  997. }
  998. #ifdef CONFIG_NET_POLL_CONTROLLER
  999. /**
  1000. * fm10k_netpoll - A Polling 'interrupt' handler
  1001. * @netdev: network interface device structure
  1002. *
  1003. * This is used by netconsole to send skbs without having to re-enable
  1004. * interrupts. It's not called while the normal interrupt routine is executing.
  1005. **/
  1006. void fm10k_netpoll(struct net_device *netdev)
  1007. {
  1008. struct fm10k_intfc *interface = netdev_priv(netdev);
  1009. int i;
  1010. /* if interface is down do nothing */
  1011. if (test_bit(__FM10K_DOWN, interface->state))
  1012. return;
  1013. for (i = 0; i < interface->num_q_vectors; i++)
  1014. fm10k_msix_clean_rings(0, interface->q_vector[i]);
  1015. }
  1016. #endif
  1017. #define FM10K_ERR_MSG(type) case (type): error = #type; break
  1018. static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
  1019. struct fm10k_fault *fault)
  1020. {
  1021. struct pci_dev *pdev = interface->pdev;
  1022. struct fm10k_hw *hw = &interface->hw;
  1023. struct fm10k_iov_data *iov_data = interface->iov_data;
  1024. char *error;
  1025. switch (type) {
  1026. case FM10K_PCA_FAULT:
  1027. switch (fault->type) {
  1028. default:
  1029. error = "Unknown PCA error";
  1030. break;
  1031. FM10K_ERR_MSG(PCA_NO_FAULT);
  1032. FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
  1033. FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
  1034. FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
  1035. FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
  1036. FM10K_ERR_MSG(PCA_POISONED_TLP);
  1037. FM10K_ERR_MSG(PCA_TLP_ABORT);
  1038. }
  1039. break;
  1040. case FM10K_THI_FAULT:
  1041. switch (fault->type) {
  1042. default:
  1043. error = "Unknown THI error";
  1044. break;
  1045. FM10K_ERR_MSG(THI_NO_FAULT);
  1046. FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
  1047. }
  1048. break;
  1049. case FM10K_FUM_FAULT:
  1050. switch (fault->type) {
  1051. default:
  1052. error = "Unknown FUM error";
  1053. break;
  1054. FM10K_ERR_MSG(FUM_NO_FAULT);
  1055. FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
  1056. FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
  1057. FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
  1058. FM10K_ERR_MSG(FUM_RO_ERROR);
  1059. FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
  1060. FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
  1061. FM10K_ERR_MSG(FUM_INVALID_TYPE);
  1062. FM10K_ERR_MSG(FUM_INVALID_LENGTH);
  1063. FM10K_ERR_MSG(FUM_INVALID_BE);
  1064. FM10K_ERR_MSG(FUM_INVALID_ALIGN);
  1065. }
  1066. break;
  1067. default:
  1068. error = "Undocumented fault";
  1069. break;
  1070. }
  1071. dev_warn(&pdev->dev,
  1072. "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
  1073. error, fault->address, fault->specinfo,
  1074. PCI_SLOT(fault->func), PCI_FUNC(fault->func));
  1075. /* For VF faults, clear out the respective LPORT, reset the queue
  1076. * resources, and then reconnect to the mailbox. This allows the
  1077. * VF in question to resume behavior. For transient faults that are
  1078. * the result of non-malicious behavior this will log the fault and
  1079. * allow the VF to resume functionality. Obviously for malicious VFs
  1080. * they will be able to attempt malicious behavior again. In this
  1081. * case, the system administrator will need to step in and manually
  1082. * remove or disable the VF in question.
  1083. */
  1084. if (fault->func && iov_data) {
  1085. int vf = fault->func - 1;
  1086. struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
  1087. hw->iov.ops.reset_lport(hw, vf_info);
  1088. hw->iov.ops.reset_resources(hw, vf_info);
  1089. /* reset_lport disables the VF, so re-enable it */
  1090. hw->iov.ops.set_lport(hw, vf_info, vf,
  1091. FM10K_VF_FLAG_MULTI_CAPABLE);
  1092. /* reset_resources will disconnect from the mbx */
  1093. vf_info->mbx.ops.connect(hw, &vf_info->mbx);
  1094. }
  1095. }
  1096. static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
  1097. {
  1098. struct fm10k_hw *hw = &interface->hw;
  1099. struct fm10k_fault fault = { 0 };
  1100. int type, err;
  1101. for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
  1102. eicr;
  1103. eicr >>= 1, type += FM10K_FAULT_SIZE) {
  1104. /* only check if there is an error reported */
  1105. if (!(eicr & 0x1))
  1106. continue;
  1107. /* retrieve fault info */
  1108. err = hw->mac.ops.get_fault(hw, type, &fault);
  1109. if (err) {
  1110. dev_err(&interface->pdev->dev,
  1111. "error reading fault\n");
  1112. continue;
  1113. }
  1114. fm10k_handle_fault(interface, type, &fault);
  1115. }
  1116. }
  1117. static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
  1118. {
  1119. struct fm10k_hw *hw = &interface->hw;
  1120. const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  1121. u32 maxholdq;
  1122. int q;
  1123. if (!(eicr & FM10K_EICR_MAXHOLDTIME))
  1124. return;
  1125. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
  1126. if (maxholdq)
  1127. fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
  1128. for (q = 255;;) {
  1129. if (maxholdq & BIT(31)) {
  1130. if (q < FM10K_MAX_QUEUES_PF) {
  1131. interface->rx_overrun_pf++;
  1132. fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
  1133. } else {
  1134. interface->rx_overrun_vf++;
  1135. }
  1136. }
  1137. maxholdq *= 2;
  1138. if (!maxholdq)
  1139. q &= ~(32 - 1);
  1140. if (!q)
  1141. break;
  1142. if (q-- % 32)
  1143. continue;
  1144. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
  1145. if (maxholdq)
  1146. fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
  1147. }
  1148. }
  1149. static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
  1150. {
  1151. struct fm10k_intfc *interface = data;
  1152. struct fm10k_hw *hw = &interface->hw;
  1153. struct fm10k_mbx_info *mbx = &hw->mbx;
  1154. u32 eicr;
  1155. s32 err = 0;
  1156. /* unmask any set bits related to this interrupt */
  1157. eicr = fm10k_read_reg(hw, FM10K_EICR);
  1158. fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
  1159. FM10K_EICR_SWITCHREADY |
  1160. FM10K_EICR_SWITCHNOTREADY));
  1161. /* report any faults found to the message log */
  1162. fm10k_report_fault(interface, eicr);
  1163. /* reset any queues disabled due to receiver overrun */
  1164. fm10k_reset_drop_on_empty(interface, eicr);
  1165. /* service mailboxes */
  1166. if (fm10k_mbx_trylock(interface)) {
  1167. err = mbx->ops.process(hw, mbx);
  1168. /* handle VFLRE events */
  1169. fm10k_iov_event(interface);
  1170. fm10k_mbx_unlock(interface);
  1171. }
  1172. if (err == FM10K_ERR_RESET_REQUESTED)
  1173. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1174. /* if switch toggled state we should reset GLORTs */
  1175. if (eicr & FM10K_EICR_SWITCHNOTREADY) {
  1176. /* force link down for at least 4 seconds */
  1177. interface->link_down_event = jiffies + (4 * HZ);
  1178. set_bit(__FM10K_LINK_DOWN, interface->state);
  1179. /* reset dglort_map back to no config */
  1180. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1181. }
  1182. /* we should validate host state after interrupt event */
  1183. hw->mac.get_host_state = true;
  1184. /* validate host state, and handle VF mailboxes in the service task */
  1185. fm10k_service_event_schedule(interface);
  1186. /* re-enable mailbox interrupt and indicate 20us delay */
  1187. fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
  1188. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  1189. FM10K_ITR_ENABLE);
  1190. return IRQ_HANDLED;
  1191. }
  1192. void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
  1193. {
  1194. struct fm10k_hw *hw = &interface->hw;
  1195. struct msix_entry *entry;
  1196. int itr_reg;
  1197. /* no mailbox IRQ to free if MSI-X is not enabled */
  1198. if (!interface->msix_entries)
  1199. return;
  1200. entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1201. /* disconnect the mailbox */
  1202. hw->mbx.ops.disconnect(hw, &hw->mbx);
  1203. /* disable Mailbox cause */
  1204. if (hw->mac.type == fm10k_mac_pf) {
  1205. fm10k_write_reg(hw, FM10K_EIMR,
  1206. FM10K_EIMR_DISABLE(PCA_FAULT) |
  1207. FM10K_EIMR_DISABLE(FUM_FAULT) |
  1208. FM10K_EIMR_DISABLE(MAILBOX) |
  1209. FM10K_EIMR_DISABLE(SWITCHREADY) |
  1210. FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
  1211. FM10K_EIMR_DISABLE(SRAMERROR) |
  1212. FM10K_EIMR_DISABLE(VFLR) |
  1213. FM10K_EIMR_DISABLE(MAXHOLDTIME));
  1214. itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
  1215. } else {
  1216. itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR);
  1217. }
  1218. fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
  1219. free_irq(entry->vector, interface);
  1220. }
  1221. static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
  1222. struct fm10k_mbx_info *mbx)
  1223. {
  1224. bool vlan_override = hw->mac.vlan_override;
  1225. u16 default_vid = hw->mac.default_vid;
  1226. struct fm10k_intfc *interface;
  1227. s32 err;
  1228. err = fm10k_msg_mac_vlan_vf(hw, results, mbx);
  1229. if (err)
  1230. return err;
  1231. interface = container_of(hw, struct fm10k_intfc, hw);
  1232. /* MAC was changed so we need reset */
  1233. if (is_valid_ether_addr(hw->mac.perm_addr) &&
  1234. !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
  1235. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1236. /* VLAN override was changed, or default VLAN changed */
  1237. if ((vlan_override != hw->mac.vlan_override) ||
  1238. (default_vid != hw->mac.default_vid))
  1239. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1240. return 0;
  1241. }
  1242. /* generic error handler for mailbox issues */
  1243. static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
  1244. struct fm10k_mbx_info __always_unused *mbx)
  1245. {
  1246. struct fm10k_intfc *interface;
  1247. struct pci_dev *pdev;
  1248. interface = container_of(hw, struct fm10k_intfc, hw);
  1249. pdev = interface->pdev;
  1250. dev_err(&pdev->dev, "Unknown message ID %u\n",
  1251. **results & FM10K_TLV_ID_MASK);
  1252. return 0;
  1253. }
  1254. static const struct fm10k_msg_data vf_mbx_data[] = {
  1255. FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
  1256. FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
  1257. FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
  1258. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1259. };
  1260. static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
  1261. {
  1262. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1263. struct net_device *dev = interface->netdev;
  1264. struct fm10k_hw *hw = &interface->hw;
  1265. int err;
  1266. /* Use timer0 for interrupt moderation on the mailbox */
  1267. u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1268. /* register mailbox handlers */
  1269. err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
  1270. if (err)
  1271. return err;
  1272. /* request the IRQ */
  1273. err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0,
  1274. dev->name, interface);
  1275. if (err) {
  1276. netif_err(interface, probe, dev,
  1277. "request_irq for msix_mbx failed: %d\n", err);
  1278. return err;
  1279. }
  1280. /* map all of the interrupt sources */
  1281. fm10k_write_reg(hw, FM10K_VFINT_MAP, itr);
  1282. /* enable interrupt */
  1283. fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE);
  1284. return 0;
  1285. }
  1286. static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
  1287. struct fm10k_mbx_info *mbx)
  1288. {
  1289. struct fm10k_intfc *interface;
  1290. u32 dglort_map = hw->mac.dglort_map;
  1291. s32 err;
  1292. interface = container_of(hw, struct fm10k_intfc, hw);
  1293. err = fm10k_msg_err_pf(hw, results, mbx);
  1294. if (!err && hw->swapi.status) {
  1295. /* force link down for a reasonable delay */
  1296. interface->link_down_event = jiffies + (2 * HZ);
  1297. set_bit(__FM10K_LINK_DOWN, interface->state);
  1298. /* reset dglort_map back to no config */
  1299. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1300. fm10k_service_event_schedule(interface);
  1301. /* prevent overloading kernel message buffer */
  1302. if (interface->lport_map_failed)
  1303. return 0;
  1304. interface->lport_map_failed = true;
  1305. if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
  1306. dev_warn(&interface->pdev->dev,
  1307. "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
  1308. dev_warn(&interface->pdev->dev,
  1309. "request logical port map failed: %d\n",
  1310. hw->swapi.status);
  1311. return 0;
  1312. }
  1313. err = fm10k_msg_lport_map_pf(hw, results, mbx);
  1314. if (err)
  1315. return err;
  1316. interface->lport_map_failed = false;
  1317. /* we need to reset if port count was just updated */
  1318. if (dglort_map != hw->mac.dglort_map)
  1319. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1320. return 0;
  1321. }
  1322. static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
  1323. struct fm10k_mbx_info __always_unused *mbx)
  1324. {
  1325. struct fm10k_intfc *interface;
  1326. u16 glort, pvid;
  1327. u32 pvid_update;
  1328. s32 err;
  1329. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
  1330. &pvid_update);
  1331. if (err)
  1332. return err;
  1333. /* extract values from the pvid update */
  1334. glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
  1335. pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
  1336. /* if glort is not valid return error */
  1337. if (!fm10k_glort_valid_pf(hw, glort))
  1338. return FM10K_ERR_PARAM;
  1339. /* verify VLAN ID is valid */
  1340. if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
  1341. return FM10K_ERR_PARAM;
  1342. interface = container_of(hw, struct fm10k_intfc, hw);
  1343. /* check to see if this belongs to one of the VFs */
  1344. err = fm10k_iov_update_pvid(interface, glort, pvid);
  1345. if (!err)
  1346. return 0;
  1347. /* we need to reset if default VLAN was just updated */
  1348. if (pvid != hw->mac.default_vid)
  1349. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1350. hw->mac.default_vid = pvid;
  1351. return 0;
  1352. }
  1353. static const struct fm10k_msg_data pf_mbx_data[] = {
  1354. FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
  1355. FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
  1356. FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
  1357. FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
  1358. FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
  1359. FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
  1360. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1361. };
  1362. static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
  1363. {
  1364. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1365. struct net_device *dev = interface->netdev;
  1366. struct fm10k_hw *hw = &interface->hw;
  1367. int err;
  1368. /* Use timer0 for interrupt moderation on the mailbox */
  1369. u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1370. u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
  1371. /* register mailbox handlers */
  1372. err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
  1373. if (err)
  1374. return err;
  1375. /* request the IRQ */
  1376. err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
  1377. dev->name, interface);
  1378. if (err) {
  1379. netif_err(interface, probe, dev,
  1380. "request_irq for msix_mbx failed: %d\n", err);
  1381. return err;
  1382. }
  1383. /* Enable interrupts w/ no moderation for "other" interrupts */
  1384. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr);
  1385. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr);
  1386. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr);
  1387. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr);
  1388. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr);
  1389. /* Enable interrupts w/ moderation for mailbox */
  1390. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr);
  1391. /* Enable individual interrupt causes */
  1392. fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
  1393. FM10K_EIMR_ENABLE(FUM_FAULT) |
  1394. FM10K_EIMR_ENABLE(MAILBOX) |
  1395. FM10K_EIMR_ENABLE(SWITCHREADY) |
  1396. FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
  1397. FM10K_EIMR_ENABLE(SRAMERROR) |
  1398. FM10K_EIMR_ENABLE(VFLR) |
  1399. FM10K_EIMR_ENABLE(MAXHOLDTIME));
  1400. /* enable interrupt */
  1401. fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
  1402. return 0;
  1403. }
  1404. int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
  1405. {
  1406. struct fm10k_hw *hw = &interface->hw;
  1407. int err;
  1408. /* enable Mailbox cause */
  1409. if (hw->mac.type == fm10k_mac_pf)
  1410. err = fm10k_mbx_request_irq_pf(interface);
  1411. else
  1412. err = fm10k_mbx_request_irq_vf(interface);
  1413. if (err)
  1414. return err;
  1415. /* connect mailbox */
  1416. err = hw->mbx.ops.connect(hw, &hw->mbx);
  1417. /* if the mailbox failed to connect, then free IRQ */
  1418. if (err)
  1419. fm10k_mbx_free_irq(interface);
  1420. return err;
  1421. }
  1422. /**
  1423. * fm10k_qv_free_irq - release interrupts associated with queue vectors
  1424. * @interface: board private structure
  1425. *
  1426. * Release all interrupts associated with this interface
  1427. **/
  1428. void fm10k_qv_free_irq(struct fm10k_intfc *interface)
  1429. {
  1430. int vector = interface->num_q_vectors;
  1431. struct fm10k_hw *hw = &interface->hw;
  1432. struct msix_entry *entry;
  1433. entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
  1434. while (vector) {
  1435. struct fm10k_q_vector *q_vector;
  1436. vector--;
  1437. entry--;
  1438. q_vector = interface->q_vector[vector];
  1439. if (!q_vector->tx.count && !q_vector->rx.count)
  1440. continue;
  1441. /* clear the affinity_mask in the IRQ descriptor */
  1442. irq_set_affinity_hint(entry->vector, NULL);
  1443. /* disable interrupts */
  1444. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1445. free_irq(entry->vector, q_vector);
  1446. }
  1447. }
  1448. /**
  1449. * fm10k_qv_request_irq - initialize interrupts for queue vectors
  1450. * @interface: board private structure
  1451. *
  1452. * Attempts to configure interrupts using the best available
  1453. * capabilities of the hardware and kernel.
  1454. **/
  1455. int fm10k_qv_request_irq(struct fm10k_intfc *interface)
  1456. {
  1457. struct net_device *dev = interface->netdev;
  1458. struct fm10k_hw *hw = &interface->hw;
  1459. struct msix_entry *entry;
  1460. unsigned int ri = 0, ti = 0;
  1461. int vector, err;
  1462. entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
  1463. for (vector = 0; vector < interface->num_q_vectors; vector++) {
  1464. struct fm10k_q_vector *q_vector = interface->q_vector[vector];
  1465. /* name the vector */
  1466. if (q_vector->tx.count && q_vector->rx.count) {
  1467. snprintf(q_vector->name, sizeof(q_vector->name),
  1468. "%s-TxRx-%u", dev->name, ri++);
  1469. ti++;
  1470. } else if (q_vector->rx.count) {
  1471. snprintf(q_vector->name, sizeof(q_vector->name),
  1472. "%s-rx-%u", dev->name, ri++);
  1473. } else if (q_vector->tx.count) {
  1474. snprintf(q_vector->name, sizeof(q_vector->name),
  1475. "%s-tx-%u", dev->name, ti++);
  1476. } else {
  1477. /* skip this unused q_vector */
  1478. continue;
  1479. }
  1480. /* Assign ITR register to q_vector */
  1481. q_vector->itr = (hw->mac.type == fm10k_mac_pf) ?
  1482. &interface->uc_addr[FM10K_ITR(entry->entry)] :
  1483. &interface->uc_addr[FM10K_VFITR(entry->entry)];
  1484. /* request the IRQ */
  1485. err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
  1486. q_vector->name, q_vector);
  1487. if (err) {
  1488. netif_err(interface, probe, dev,
  1489. "request_irq failed for MSIX interrupt Error: %d\n",
  1490. err);
  1491. goto err_out;
  1492. }
  1493. /* assign the mask for this irq */
  1494. irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
  1495. /* Enable q_vector */
  1496. writel(FM10K_ITR_ENABLE, q_vector->itr);
  1497. entry++;
  1498. }
  1499. return 0;
  1500. err_out:
  1501. /* wind through the ring freeing all entries and vectors */
  1502. while (vector) {
  1503. struct fm10k_q_vector *q_vector;
  1504. entry--;
  1505. vector--;
  1506. q_vector = interface->q_vector[vector];
  1507. if (!q_vector->tx.count && !q_vector->rx.count)
  1508. continue;
  1509. /* clear the affinity_mask in the IRQ descriptor */
  1510. irq_set_affinity_hint(entry->vector, NULL);
  1511. /* disable interrupts */
  1512. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1513. free_irq(entry->vector, q_vector);
  1514. }
  1515. return err;
  1516. }
  1517. void fm10k_up(struct fm10k_intfc *interface)
  1518. {
  1519. struct fm10k_hw *hw = &interface->hw;
  1520. /* Enable Tx/Rx DMA */
  1521. hw->mac.ops.start_hw(hw);
  1522. /* configure Tx descriptor rings */
  1523. fm10k_configure_tx(interface);
  1524. /* configure Rx descriptor rings */
  1525. fm10k_configure_rx(interface);
  1526. /* configure interrupts */
  1527. hw->mac.ops.update_int_moderator(hw);
  1528. /* enable statistics capture again */
  1529. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  1530. /* clear down bit to indicate we are ready to go */
  1531. clear_bit(__FM10K_DOWN, interface->state);
  1532. /* enable polling cleanups */
  1533. fm10k_napi_enable_all(interface);
  1534. /* re-establish Rx filters */
  1535. fm10k_restore_rx_state(interface);
  1536. /* enable transmits */
  1537. netif_tx_start_all_queues(interface->netdev);
  1538. /* kick off the service timer now */
  1539. hw->mac.get_host_state = true;
  1540. mod_timer(&interface->service_timer, jiffies);
  1541. }
  1542. static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
  1543. {
  1544. struct fm10k_q_vector *q_vector;
  1545. int q_idx;
  1546. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  1547. q_vector = interface->q_vector[q_idx];
  1548. napi_disable(&q_vector->napi);
  1549. }
  1550. }
  1551. void fm10k_down(struct fm10k_intfc *interface)
  1552. {
  1553. struct net_device *netdev = interface->netdev;
  1554. struct fm10k_hw *hw = &interface->hw;
  1555. int err, i = 0, count = 0;
  1556. /* signal that we are down to the interrupt handler and service task */
  1557. if (test_and_set_bit(__FM10K_DOWN, interface->state))
  1558. return;
  1559. /* call carrier off first to avoid false dev_watchdog timeouts */
  1560. netif_carrier_off(netdev);
  1561. /* disable transmits */
  1562. netif_tx_stop_all_queues(netdev);
  1563. netif_tx_disable(netdev);
  1564. /* reset Rx filters */
  1565. fm10k_reset_rx_state(interface);
  1566. /* disable polling routines */
  1567. fm10k_napi_disable_all(interface);
  1568. /* capture stats one last time before stopping interface */
  1569. fm10k_update_stats(interface);
  1570. /* prevent updating statistics while we're down */
  1571. while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  1572. usleep_range(1000, 2000);
  1573. /* skip waiting for TX DMA if we lost PCIe link */
  1574. if (FM10K_REMOVED(hw->hw_addr))
  1575. goto skip_tx_dma_drain;
  1576. /* In some rare circumstances it can take a while for Tx queues to
  1577. * quiesce and be fully disabled. Attempt to .stop_hw() first, and
  1578. * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
  1579. * until the Tx queues have emptied, or until a number of retries. If
  1580. * we fail to clear within the retry loop, we will issue a warning
  1581. * indicating that Tx DMA is probably hung. Note this means we call
  1582. * .stop_hw() twice but this shouldn't cause any problems.
  1583. */
  1584. err = hw->mac.ops.stop_hw(hw);
  1585. if (err != FM10K_ERR_REQUESTS_PENDING)
  1586. goto skip_tx_dma_drain;
  1587. #define TX_DMA_DRAIN_RETRIES 25
  1588. for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
  1589. usleep_range(10000, 20000);
  1590. /* start checking at the last ring to have pending Tx */
  1591. for (; i < interface->num_tx_queues; i++)
  1592. if (fm10k_get_tx_pending(interface->tx_ring[i], false))
  1593. break;
  1594. /* if all the queues are drained, we can break now */
  1595. if (i == interface->num_tx_queues)
  1596. break;
  1597. }
  1598. if (count >= TX_DMA_DRAIN_RETRIES)
  1599. dev_err(&interface->pdev->dev,
  1600. "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
  1601. count);
  1602. skip_tx_dma_drain:
  1603. /* Disable DMA engine for Tx/Rx */
  1604. err = hw->mac.ops.stop_hw(hw);
  1605. if (err == FM10K_ERR_REQUESTS_PENDING)
  1606. dev_err(&interface->pdev->dev,
  1607. "due to pending requests hw was not shut down gracefully\n");
  1608. else if (err)
  1609. dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
  1610. /* free any buffers still on the rings */
  1611. fm10k_clean_all_tx_rings(interface);
  1612. fm10k_clean_all_rx_rings(interface);
  1613. }
  1614. /**
  1615. * fm10k_sw_init - Initialize general software structures
  1616. * @interface: host interface private structure to initialize
  1617. * @ent: PCI device ID entry
  1618. *
  1619. * fm10k_sw_init initializes the interface private data structure.
  1620. * Fields are initialized based on PCI device information and
  1621. * OS network device settings (MTU size).
  1622. **/
  1623. static int fm10k_sw_init(struct fm10k_intfc *interface,
  1624. const struct pci_device_id *ent)
  1625. {
  1626. const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
  1627. struct fm10k_hw *hw = &interface->hw;
  1628. struct pci_dev *pdev = interface->pdev;
  1629. struct net_device *netdev = interface->netdev;
  1630. u32 rss_key[FM10K_RSSRK_SIZE];
  1631. unsigned int rss;
  1632. int err;
  1633. /* initialize back pointer */
  1634. hw->back = interface;
  1635. hw->hw_addr = interface->uc_addr;
  1636. /* PCI config space info */
  1637. hw->vendor_id = pdev->vendor;
  1638. hw->device_id = pdev->device;
  1639. hw->revision_id = pdev->revision;
  1640. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1641. hw->subsystem_device_id = pdev->subsystem_device;
  1642. /* Setup hw api */
  1643. memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
  1644. hw->mac.type = fi->mac;
  1645. /* Setup IOV handlers */
  1646. if (fi->iov_ops)
  1647. memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
  1648. /* Set common capability flags and settings */
  1649. rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
  1650. interface->ring_feature[RING_F_RSS].limit = rss;
  1651. fi->get_invariants(hw);
  1652. /* pick up the PCIe bus settings for reporting later */
  1653. if (hw->mac.ops.get_bus_info)
  1654. hw->mac.ops.get_bus_info(hw);
  1655. /* limit the usable DMA range */
  1656. if (hw->mac.ops.set_dma_mask)
  1657. hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
  1658. /* update netdev with DMA restrictions */
  1659. if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
  1660. netdev->features |= NETIF_F_HIGHDMA;
  1661. netdev->vlan_features |= NETIF_F_HIGHDMA;
  1662. }
  1663. /* reset and initialize the hardware so it is in a known state */
  1664. err = hw->mac.ops.reset_hw(hw);
  1665. if (err) {
  1666. dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
  1667. return err;
  1668. }
  1669. err = hw->mac.ops.init_hw(hw);
  1670. if (err) {
  1671. dev_err(&pdev->dev, "init_hw failed: %d\n", err);
  1672. return err;
  1673. }
  1674. /* initialize hardware statistics */
  1675. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  1676. /* Set upper limit on IOV VFs that can be allocated */
  1677. pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
  1678. /* Start with random Ethernet address */
  1679. eth_random_addr(hw->mac.addr);
  1680. /* Initialize MAC address from hardware */
  1681. err = hw->mac.ops.read_mac_addr(hw);
  1682. if (err) {
  1683. dev_warn(&pdev->dev,
  1684. "Failed to obtain MAC address defaulting to random\n");
  1685. /* tag address assignment as random */
  1686. netdev->addr_assign_type |= NET_ADDR_RANDOM;
  1687. }
  1688. ether_addr_copy(netdev->dev_addr, hw->mac.addr);
  1689. ether_addr_copy(netdev->perm_addr, hw->mac.addr);
  1690. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1691. dev_err(&pdev->dev, "Invalid MAC Address\n");
  1692. return -EIO;
  1693. }
  1694. /* initialize DCBNL interface */
  1695. fm10k_dcbnl_set_ops(netdev);
  1696. /* set default ring sizes */
  1697. interface->tx_ring_count = FM10K_DEFAULT_TXD;
  1698. interface->rx_ring_count = FM10K_DEFAULT_RXD;
  1699. /* set default interrupt moderation */
  1700. interface->tx_itr = FM10K_TX_ITR_DEFAULT;
  1701. interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
  1702. /* initialize udp port lists */
  1703. INIT_LIST_HEAD(&interface->vxlan_port);
  1704. INIT_LIST_HEAD(&interface->geneve_port);
  1705. /* Initialize the MAC/VLAN queue */
  1706. INIT_LIST_HEAD(&interface->macvlan_requests);
  1707. netdev_rss_key_fill(rss_key, sizeof(rss_key));
  1708. memcpy(interface->rssrk, rss_key, sizeof(rss_key));
  1709. /* Initialize the mailbox lock */
  1710. spin_lock_init(&interface->mbx_lock);
  1711. spin_lock_init(&interface->macvlan_lock);
  1712. /* Start off interface as being down */
  1713. set_bit(__FM10K_DOWN, interface->state);
  1714. set_bit(__FM10K_UPDATING_STATS, interface->state);
  1715. return 0;
  1716. }
  1717. /**
  1718. * fm10k_probe - Device Initialization Routine
  1719. * @pdev: PCI device information struct
  1720. * @ent: entry in fm10k_pci_tbl
  1721. *
  1722. * Returns 0 on success, negative on failure
  1723. *
  1724. * fm10k_probe initializes an interface identified by a pci_dev structure.
  1725. * The OS initialization, configuring of the interface private structure,
  1726. * and a hardware reset occur.
  1727. **/
  1728. static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1729. {
  1730. struct net_device *netdev;
  1731. struct fm10k_intfc *interface;
  1732. int err;
  1733. if (pdev->error_state != pci_channel_io_normal) {
  1734. dev_err(&pdev->dev,
  1735. "PCI device still in an error state. Unable to load...\n");
  1736. return -EIO;
  1737. }
  1738. err = pci_enable_device_mem(pdev);
  1739. if (err) {
  1740. dev_err(&pdev->dev,
  1741. "PCI enable device failed: %d\n", err);
  1742. return err;
  1743. }
  1744. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
  1745. if (err)
  1746. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1747. if (err) {
  1748. dev_err(&pdev->dev,
  1749. "DMA configuration failed: %d\n", err);
  1750. goto err_dma;
  1751. }
  1752. err = pci_request_mem_regions(pdev, fm10k_driver_name);
  1753. if (err) {
  1754. dev_err(&pdev->dev,
  1755. "pci_request_selected_regions failed: %d\n", err);
  1756. goto err_pci_reg;
  1757. }
  1758. pci_enable_pcie_error_reporting(pdev);
  1759. pci_set_master(pdev);
  1760. pci_save_state(pdev);
  1761. netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]);
  1762. if (!netdev) {
  1763. err = -ENOMEM;
  1764. goto err_alloc_netdev;
  1765. }
  1766. SET_NETDEV_DEV(netdev, &pdev->dev);
  1767. interface = netdev_priv(netdev);
  1768. pci_set_drvdata(pdev, interface);
  1769. interface->netdev = netdev;
  1770. interface->pdev = pdev;
  1771. interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
  1772. FM10K_UC_ADDR_SIZE);
  1773. if (!interface->uc_addr) {
  1774. err = -EIO;
  1775. goto err_ioremap;
  1776. }
  1777. err = fm10k_sw_init(interface, ent);
  1778. if (err)
  1779. goto err_sw_init;
  1780. /* enable debugfs support */
  1781. fm10k_dbg_intfc_init(interface);
  1782. err = fm10k_init_queueing_scheme(interface);
  1783. if (err)
  1784. goto err_sw_init;
  1785. /* the mbx interrupt might attempt to schedule the service task, so we
  1786. * must ensure it is disabled since we haven't yet requested the timer
  1787. * or work item.
  1788. */
  1789. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1790. err = fm10k_mbx_request_irq(interface);
  1791. if (err)
  1792. goto err_mbx_interrupt;
  1793. /* final check of hardware state before registering the interface */
  1794. err = fm10k_hw_ready(interface);
  1795. if (err)
  1796. goto err_register;
  1797. err = register_netdev(netdev);
  1798. if (err)
  1799. goto err_register;
  1800. /* carrier off reporting is important to ethtool even BEFORE open */
  1801. netif_carrier_off(netdev);
  1802. /* stop all the transmit queues from transmitting until link is up */
  1803. netif_tx_stop_all_queues(netdev);
  1804. /* Initialize service timer and service task late in order to avoid
  1805. * cleanup issues.
  1806. */
  1807. timer_setup(&interface->service_timer, fm10k_service_timer, 0);
  1808. INIT_WORK(&interface->service_task, fm10k_service_task);
  1809. /* Setup the MAC/VLAN queue */
  1810. INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task);
  1811. /* kick off service timer now, even when interface is down */
  1812. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  1813. /* print warning for non-optimal configurations */
  1814. pcie_print_link_status(interface->pdev);
  1815. /* report MAC address for logging */
  1816. dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
  1817. /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
  1818. fm10k_iov_configure(pdev, 0);
  1819. /* clear the service task disable bit and kick off service task */
  1820. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1821. fm10k_service_event_schedule(interface);
  1822. return 0;
  1823. err_register:
  1824. fm10k_mbx_free_irq(interface);
  1825. err_mbx_interrupt:
  1826. fm10k_clear_queueing_scheme(interface);
  1827. err_sw_init:
  1828. if (interface->sw_addr)
  1829. iounmap(interface->sw_addr);
  1830. iounmap(interface->uc_addr);
  1831. err_ioremap:
  1832. free_netdev(netdev);
  1833. err_alloc_netdev:
  1834. pci_release_mem_regions(pdev);
  1835. err_pci_reg:
  1836. err_dma:
  1837. pci_disable_device(pdev);
  1838. return err;
  1839. }
  1840. /**
  1841. * fm10k_remove - Device Removal Routine
  1842. * @pdev: PCI device information struct
  1843. *
  1844. * fm10k_remove is called by the PCI subsystem to alert the driver
  1845. * that it should release a PCI device. The could be caused by a
  1846. * Hot-Plug event, or because the driver is going to be removed from
  1847. * memory.
  1848. **/
  1849. static void fm10k_remove(struct pci_dev *pdev)
  1850. {
  1851. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1852. struct net_device *netdev = interface->netdev;
  1853. del_timer_sync(&interface->service_timer);
  1854. fm10k_stop_service_event(interface);
  1855. fm10k_stop_macvlan_task(interface);
  1856. /* Remove all pending MAC/VLAN requests */
  1857. fm10k_clear_macvlan_queue(interface, interface->glort, true);
  1858. /* free netdev, this may bounce the interrupts due to setup_tc */
  1859. if (netdev->reg_state == NETREG_REGISTERED)
  1860. unregister_netdev(netdev);
  1861. /* release VFs */
  1862. fm10k_iov_disable(pdev);
  1863. /* disable mailbox interrupt */
  1864. fm10k_mbx_free_irq(interface);
  1865. /* free interrupts */
  1866. fm10k_clear_queueing_scheme(interface);
  1867. /* remove any debugfs interfaces */
  1868. fm10k_dbg_intfc_exit(interface);
  1869. if (interface->sw_addr)
  1870. iounmap(interface->sw_addr);
  1871. iounmap(interface->uc_addr);
  1872. free_netdev(netdev);
  1873. pci_release_mem_regions(pdev);
  1874. pci_disable_pcie_error_reporting(pdev);
  1875. pci_disable_device(pdev);
  1876. }
  1877. static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
  1878. {
  1879. /* the watchdog task reads from registers, which might appear like
  1880. * a surprise remove if the PCIe device is disabled while we're
  1881. * stopped. We stop the watchdog task until after we resume software
  1882. * activity.
  1883. *
  1884. * Note that the MAC/VLAN task will be stopped as part of preparing
  1885. * for reset so we don't need to handle it here.
  1886. */
  1887. fm10k_stop_service_event(interface);
  1888. if (fm10k_prepare_for_reset(interface))
  1889. set_bit(__FM10K_RESET_SUSPENDED, interface->state);
  1890. }
  1891. static int fm10k_handle_resume(struct fm10k_intfc *interface)
  1892. {
  1893. struct fm10k_hw *hw = &interface->hw;
  1894. int err;
  1895. /* Even if we didn't properly prepare for reset in
  1896. * fm10k_prepare_suspend, we'll attempt to resume anyways.
  1897. */
  1898. if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state))
  1899. dev_warn(&interface->pdev->dev,
  1900. "Device was shut down as part of suspend... Attempting to recover\n");
  1901. /* reset statistics starting values */
  1902. hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
  1903. err = fm10k_handle_reset(interface);
  1904. if (err)
  1905. return err;
  1906. /* assume host is not ready, to prevent race with watchdog in case we
  1907. * actually don't have connection to the switch
  1908. */
  1909. interface->host_ready = false;
  1910. fm10k_watchdog_host_not_ready(interface);
  1911. /* force link to stay down for a second to prevent link flutter */
  1912. interface->link_down_event = jiffies + (HZ);
  1913. set_bit(__FM10K_LINK_DOWN, interface->state);
  1914. /* restart the service task */
  1915. fm10k_start_service_event(interface);
  1916. /* Restart the MAC/VLAN request queue in-case of outstanding events */
  1917. fm10k_macvlan_schedule(interface);
  1918. return err;
  1919. }
  1920. /**
  1921. * fm10k_resume - Generic PM resume hook
  1922. * @dev: generic device structure
  1923. *
  1924. * Generic PM hook used when waking the device from a low power state after
  1925. * suspend or hibernation. This function does not need to handle lower PCIe
  1926. * device state as the stack takes care of that for us.
  1927. **/
  1928. static int __maybe_unused fm10k_resume(struct device *dev)
  1929. {
  1930. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  1931. struct net_device *netdev = interface->netdev;
  1932. struct fm10k_hw *hw = &interface->hw;
  1933. int err;
  1934. /* refresh hw_addr in case it was dropped */
  1935. hw->hw_addr = interface->uc_addr;
  1936. err = fm10k_handle_resume(interface);
  1937. if (err)
  1938. return err;
  1939. netif_device_attach(netdev);
  1940. return 0;
  1941. }
  1942. /**
  1943. * fm10k_suspend - Generic PM suspend hook
  1944. * @dev: generic device structure
  1945. *
  1946. * Generic PM hook used when setting the device into a low power state for
  1947. * system suspend or hibernation. This function does not need to handle lower
  1948. * PCIe device state as the stack takes care of that for us.
  1949. **/
  1950. static int __maybe_unused fm10k_suspend(struct device *dev)
  1951. {
  1952. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  1953. struct net_device *netdev = interface->netdev;
  1954. netif_device_detach(netdev);
  1955. fm10k_prepare_suspend(interface);
  1956. return 0;
  1957. }
  1958. /**
  1959. * fm10k_io_error_detected - called when PCI error is detected
  1960. * @pdev: Pointer to PCI device
  1961. * @state: The current pci connection state
  1962. *
  1963. * This function is called after a PCI bus error affecting
  1964. * this device has been detected.
  1965. */
  1966. static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
  1967. pci_channel_state_t state)
  1968. {
  1969. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1970. struct net_device *netdev = interface->netdev;
  1971. netif_device_detach(netdev);
  1972. if (state == pci_channel_io_perm_failure)
  1973. return PCI_ERS_RESULT_DISCONNECT;
  1974. fm10k_prepare_suspend(interface);
  1975. /* Request a slot reset. */
  1976. return PCI_ERS_RESULT_NEED_RESET;
  1977. }
  1978. /**
  1979. * fm10k_io_slot_reset - called after the pci bus has been reset.
  1980. * @pdev: Pointer to PCI device
  1981. *
  1982. * Restart the card from scratch, as if from a cold-boot.
  1983. */
  1984. static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
  1985. {
  1986. pci_ers_result_t result;
  1987. if (pci_reenable_device(pdev)) {
  1988. dev_err(&pdev->dev,
  1989. "Cannot re-enable PCI device after reset.\n");
  1990. result = PCI_ERS_RESULT_DISCONNECT;
  1991. } else {
  1992. pci_set_master(pdev);
  1993. pci_restore_state(pdev);
  1994. /* After second error pci->state_saved is false, this
  1995. * resets it so EEH doesn't break.
  1996. */
  1997. pci_save_state(pdev);
  1998. pci_wake_from_d3(pdev, false);
  1999. result = PCI_ERS_RESULT_RECOVERED;
  2000. }
  2001. pci_cleanup_aer_uncorrect_error_status(pdev);
  2002. return result;
  2003. }
  2004. /**
  2005. * fm10k_io_resume - called when traffic can start flowing again.
  2006. * @pdev: Pointer to PCI device
  2007. *
  2008. * This callback is called when the error recovery driver tells us that
  2009. * its OK to resume normal operation.
  2010. */
  2011. static void fm10k_io_resume(struct pci_dev *pdev)
  2012. {
  2013. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2014. struct net_device *netdev = interface->netdev;
  2015. int err;
  2016. err = fm10k_handle_resume(interface);
  2017. if (err)
  2018. dev_warn(&pdev->dev,
  2019. "%s failed: %d\n", __func__, err);
  2020. else
  2021. netif_device_attach(netdev);
  2022. }
  2023. /**
  2024. * fm10k_io_reset_prepare - called when PCI function is about to be reset
  2025. * @pdev: Pointer to PCI device
  2026. *
  2027. * This callback is called when the PCI function is about to be reset,
  2028. * allowing the device driver to prepare for it.
  2029. */
  2030. static void fm10k_io_reset_prepare(struct pci_dev *pdev)
  2031. {
  2032. /* warn incase we have any active VF devices */
  2033. if (pci_num_vf(pdev))
  2034. dev_warn(&pdev->dev,
  2035. "PCIe FLR may cause issues for any active VF devices\n");
  2036. fm10k_prepare_suspend(pci_get_drvdata(pdev));
  2037. }
  2038. /**
  2039. * fm10k_io_reset_done - called when PCI function has finished resetting
  2040. * @pdev: Pointer to PCI device
  2041. *
  2042. * This callback is called just after the PCI function is reset, such as via
  2043. * /sys/class/net/<enpX>/device/reset or similar.
  2044. */
  2045. static void fm10k_io_reset_done(struct pci_dev *pdev)
  2046. {
  2047. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2048. int err = fm10k_handle_resume(interface);
  2049. if (err) {
  2050. dev_warn(&pdev->dev,
  2051. "%s failed: %d\n", __func__, err);
  2052. netif_device_detach(interface->netdev);
  2053. }
  2054. }
  2055. static const struct pci_error_handlers fm10k_err_handler = {
  2056. .error_detected = fm10k_io_error_detected,
  2057. .slot_reset = fm10k_io_slot_reset,
  2058. .resume = fm10k_io_resume,
  2059. .reset_prepare = fm10k_io_reset_prepare,
  2060. .reset_done = fm10k_io_reset_done,
  2061. };
  2062. static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
  2063. static struct pci_driver fm10k_driver = {
  2064. .name = fm10k_driver_name,
  2065. .id_table = fm10k_pci_tbl,
  2066. .probe = fm10k_probe,
  2067. .remove = fm10k_remove,
  2068. .driver = {
  2069. .pm = &fm10k_pm_ops,
  2070. },
  2071. .sriov_configure = fm10k_iov_configure,
  2072. .err_handler = &fm10k_err_handler
  2073. };
  2074. /**
  2075. * fm10k_register_pci_driver - register driver interface
  2076. *
  2077. * This function is called on module load in order to register the driver.
  2078. **/
  2079. int fm10k_register_pci_driver(void)
  2080. {
  2081. return pci_register_driver(&fm10k_driver);
  2082. }
  2083. /**
  2084. * fm10k_unregister_pci_driver - unregister driver interface
  2085. *
  2086. * This function is called on module unload in order to remove the driver.
  2087. **/
  2088. void fm10k_unregister_pci_driver(void)
  2089. {
  2090. pci_unregister_driver(&fm10k_driver);
  2091. }