efx.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2013 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/delay.h>
  15. #include <linux/notifier.h>
  16. #include <linux/ip.h>
  17. #include <linux/tcp.h>
  18. #include <linux/in.h>
  19. #include <linux/ethtool.h>
  20. #include <linux/topology.h>
  21. #include <linux/gfp.h>
  22. #include <linux/aer.h>
  23. #include <linux/interrupt.h>
  24. #include "net_driver.h"
  25. #include "efx.h"
  26. #include "nic.h"
  27. #include "selftest.h"
  28. #include "mcdi.h"
  29. #include "workarounds.h"
  30. /**************************************************************************
  31. *
  32. * Type name strings
  33. *
  34. **************************************************************************
  35. */
  36. /* Loopback mode names (see LOOPBACK_MODE()) */
  37. const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
  38. const char *const efx_loopback_mode_names[] = {
  39. [LOOPBACK_NONE] = "NONE",
  40. [LOOPBACK_DATA] = "DATAPATH",
  41. [LOOPBACK_GMAC] = "GMAC",
  42. [LOOPBACK_XGMII] = "XGMII",
  43. [LOOPBACK_XGXS] = "XGXS",
  44. [LOOPBACK_XAUI] = "XAUI",
  45. [LOOPBACK_GMII] = "GMII",
  46. [LOOPBACK_SGMII] = "SGMII",
  47. [LOOPBACK_XGBR] = "XGBR",
  48. [LOOPBACK_XFI] = "XFI",
  49. [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
  50. [LOOPBACK_GMII_FAR] = "GMII_FAR",
  51. [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
  52. [LOOPBACK_XFI_FAR] = "XFI_FAR",
  53. [LOOPBACK_GPHY] = "GPHY",
  54. [LOOPBACK_PHYXS] = "PHYXS",
  55. [LOOPBACK_PCS] = "PCS",
  56. [LOOPBACK_PMAPMD] = "PMA/PMD",
  57. [LOOPBACK_XPORT] = "XPORT",
  58. [LOOPBACK_XGMII_WS] = "XGMII_WS",
  59. [LOOPBACK_XAUI_WS] = "XAUI_WS",
  60. [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
  61. [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  62. [LOOPBACK_GMII_WS] = "GMII_WS",
  63. [LOOPBACK_XFI_WS] = "XFI_WS",
  64. [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
  65. [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
  66. };
  67. const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
  68. const char *const efx_reset_type_names[] = {
  69. [RESET_TYPE_INVISIBLE] = "INVISIBLE",
  70. [RESET_TYPE_ALL] = "ALL",
  71. [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
  72. [RESET_TYPE_WORLD] = "WORLD",
  73. [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
  74. [RESET_TYPE_MC_BIST] = "MC_BIST",
  75. [RESET_TYPE_DISABLE] = "DISABLE",
  76. [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
  77. [RESET_TYPE_INT_ERROR] = "INT_ERROR",
  78. [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
  79. [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
  80. [RESET_TYPE_TX_SKIP] = "TX_SKIP",
  81. [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
  82. [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
  83. };
  84. /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  85. * queued onto this work queue. This is not a per-nic work queue, because
  86. * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  87. */
  88. static struct workqueue_struct *reset_workqueue;
  89. /* How often and how many times to poll for a reset while waiting for a
  90. * BIST that another function started to complete.
  91. */
  92. #define BIST_WAIT_DELAY_MS 100
  93. #define BIST_WAIT_DELAY_COUNT 100
  94. /**************************************************************************
  95. *
  96. * Configurable values
  97. *
  98. *************************************************************************/
  99. /*
  100. * Use separate channels for TX and RX events
  101. *
  102. * Set this to 1 to use separate channels for TX and RX. It allows us
  103. * to control interrupt affinity separately for TX and RX.
  104. *
  105. * This is only used in MSI-X interrupt mode
  106. */
  107. static bool separate_tx_channels;
  108. module_param(separate_tx_channels, bool, 0444);
  109. MODULE_PARM_DESC(separate_tx_channels,
  110. "Use separate channels for TX and RX");
  111. /* This is the weight assigned to each of the (per-channel) virtual
  112. * NAPI devices.
  113. */
  114. static int napi_weight = 64;
  115. /* This is the time (in jiffies) between invocations of the hardware
  116. * monitor.
  117. * On Falcon-based NICs, this will:
  118. * - Check the on-board hardware monitor;
  119. * - Poll the link state and reconfigure the hardware as necessary.
  120. * On Siena-based NICs for power systems with EEH support, this will give EEH a
  121. * chance to start.
  122. */
  123. static unsigned int efx_monitor_interval = 1 * HZ;
  124. /* Initial interrupt moderation settings. They can be modified after
  125. * module load with ethtool.
  126. *
  127. * The default for RX should strike a balance between increasing the
  128. * round-trip latency and reducing overhead.
  129. */
  130. static unsigned int rx_irq_mod_usec = 60;
  131. /* Initial interrupt moderation settings. They can be modified after
  132. * module load with ethtool.
  133. *
  134. * This default is chosen to ensure that a 10G link does not go idle
  135. * while a TX queue is stopped after it has become full. A queue is
  136. * restarted when it drops below half full. The time this takes (assuming
  137. * worst case 3 descriptors per packet and 1024 descriptors) is
  138. * 512 / 3 * 1.2 = 205 usec.
  139. */
  140. static unsigned int tx_irq_mod_usec = 150;
  141. /* This is the first interrupt mode to try out of:
  142. * 0 => MSI-X
  143. * 1 => MSI
  144. * 2 => legacy
  145. */
  146. static unsigned int interrupt_mode;
  147. /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
  148. * i.e. the number of CPUs among which we may distribute simultaneous
  149. * interrupt handling.
  150. *
  151. * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
  152. * The default (0) means to assign an interrupt to each core.
  153. */
  154. static unsigned int rss_cpus;
  155. module_param(rss_cpus, uint, 0444);
  156. MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  157. static bool phy_flash_cfg;
  158. module_param(phy_flash_cfg, bool, 0644);
  159. MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
  160. static unsigned irq_adapt_low_thresh = 8000;
  161. module_param(irq_adapt_low_thresh, uint, 0644);
  162. MODULE_PARM_DESC(irq_adapt_low_thresh,
  163. "Threshold score for reducing IRQ moderation");
  164. static unsigned irq_adapt_high_thresh = 16000;
  165. module_param(irq_adapt_high_thresh, uint, 0644);
  166. MODULE_PARM_DESC(irq_adapt_high_thresh,
  167. "Threshold score for increasing IRQ moderation");
  168. static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  169. NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
  170. NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
  171. NETIF_MSG_TX_ERR | NETIF_MSG_HW);
  172. module_param(debug, uint, 0);
  173. MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
  174. /**************************************************************************
  175. *
  176. * Utility functions and prototypes
  177. *
  178. *************************************************************************/
  179. static int efx_soft_enable_interrupts(struct efx_nic *efx);
  180. static void efx_soft_disable_interrupts(struct efx_nic *efx);
  181. static void efx_remove_channel(struct efx_channel *channel);
  182. static void efx_remove_channels(struct efx_nic *efx);
  183. static const struct efx_channel_type efx_default_channel_type;
  184. static void efx_remove_port(struct efx_nic *efx);
  185. static void efx_init_napi_channel(struct efx_channel *channel);
  186. static void efx_fini_napi(struct efx_nic *efx);
  187. static void efx_fini_napi_channel(struct efx_channel *channel);
  188. static void efx_fini_struct(struct efx_nic *efx);
  189. static void efx_start_all(struct efx_nic *efx);
  190. static void efx_stop_all(struct efx_nic *efx);
  191. #define EFX_ASSERT_RESET_SERIALISED(efx) \
  192. do { \
  193. if ((efx->state == STATE_READY) || \
  194. (efx->state == STATE_RECOVERY) || \
  195. (efx->state == STATE_DISABLED)) \
  196. ASSERT_RTNL(); \
  197. } while (0)
  198. static int efx_check_disabled(struct efx_nic *efx)
  199. {
  200. if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
  201. netif_err(efx, drv, efx->net_dev,
  202. "device is disabled due to earlier errors\n");
  203. return -EIO;
  204. }
  205. return 0;
  206. }
  207. /**************************************************************************
  208. *
  209. * Event queue processing
  210. *
  211. *************************************************************************/
  212. /* Process channel's event queue
  213. *
  214. * This function is responsible for processing the event queue of a
  215. * single channel. The caller must guarantee that this function will
  216. * never be concurrently called more than once on the same channel,
  217. * though different channels may be being processed concurrently.
  218. */
  219. static int efx_process_channel(struct efx_channel *channel, int budget)
  220. {
  221. int spent;
  222. if (unlikely(!channel->enabled))
  223. return 0;
  224. spent = efx_nic_process_eventq(channel, budget);
  225. if (spent && efx_channel_has_rx_queue(channel)) {
  226. struct efx_rx_queue *rx_queue =
  227. efx_channel_get_rx_queue(channel);
  228. efx_rx_flush_packet(channel);
  229. efx_fast_push_rx_descriptors(rx_queue, true);
  230. }
  231. return spent;
  232. }
  233. /* NAPI poll handler
  234. *
  235. * NAPI guarantees serialisation of polls of the same device, which
  236. * provides the guarantee required by efx_process_channel().
  237. */
  238. static int efx_poll(struct napi_struct *napi, int budget)
  239. {
  240. struct efx_channel *channel =
  241. container_of(napi, struct efx_channel, napi_str);
  242. struct efx_nic *efx = channel->efx;
  243. int spent;
  244. if (!efx_channel_lock_napi(channel))
  245. return budget;
  246. netif_vdbg(efx, intr, efx->net_dev,
  247. "channel %d NAPI poll executing on CPU %d\n",
  248. channel->channel, raw_smp_processor_id());
  249. spent = efx_process_channel(channel, budget);
  250. if (spent < budget) {
  251. if (efx_channel_has_rx_queue(channel) &&
  252. efx->irq_rx_adaptive &&
  253. unlikely(++channel->irq_count == 1000)) {
  254. if (unlikely(channel->irq_mod_score <
  255. irq_adapt_low_thresh)) {
  256. if (channel->irq_moderation > 1) {
  257. channel->irq_moderation -= 1;
  258. efx->type->push_irq_moderation(channel);
  259. }
  260. } else if (unlikely(channel->irq_mod_score >
  261. irq_adapt_high_thresh)) {
  262. if (channel->irq_moderation <
  263. efx->irq_rx_moderation) {
  264. channel->irq_moderation += 1;
  265. efx->type->push_irq_moderation(channel);
  266. }
  267. }
  268. channel->irq_count = 0;
  269. channel->irq_mod_score = 0;
  270. }
  271. efx_filter_rfs_expire(channel);
  272. /* There is no race here; although napi_disable() will
  273. * only wait for napi_complete(), this isn't a problem
  274. * since efx_nic_eventq_read_ack() will have no effect if
  275. * interrupts have already been disabled.
  276. */
  277. napi_complete(napi);
  278. efx_nic_eventq_read_ack(channel);
  279. }
  280. efx_channel_unlock_napi(channel);
  281. return spent;
  282. }
  283. /* Create event queue
  284. * Event queue memory allocations are done only once. If the channel
  285. * is reset, the memory buffer will be reused; this guards against
  286. * errors during channel reset and also simplifies interrupt handling.
  287. */
  288. static int efx_probe_eventq(struct efx_channel *channel)
  289. {
  290. struct efx_nic *efx = channel->efx;
  291. unsigned long entries;
  292. netif_dbg(efx, probe, efx->net_dev,
  293. "chan %d create event queue\n", channel->channel);
  294. /* Build an event queue with room for one event per tx and rx buffer,
  295. * plus some extra for link state events and MCDI completions. */
  296. entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
  297. EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
  298. channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
  299. return efx_nic_probe_eventq(channel);
  300. }
  301. /* Prepare channel's event queue */
  302. static int efx_init_eventq(struct efx_channel *channel)
  303. {
  304. struct efx_nic *efx = channel->efx;
  305. int rc;
  306. EFX_WARN_ON_PARANOID(channel->eventq_init);
  307. netif_dbg(efx, drv, efx->net_dev,
  308. "chan %d init event queue\n", channel->channel);
  309. rc = efx_nic_init_eventq(channel);
  310. if (rc == 0) {
  311. efx->type->push_irq_moderation(channel);
  312. channel->eventq_read_ptr = 0;
  313. channel->eventq_init = true;
  314. }
  315. return rc;
  316. }
  317. /* Enable event queue processing and NAPI */
  318. void efx_start_eventq(struct efx_channel *channel)
  319. {
  320. netif_dbg(channel->efx, ifup, channel->efx->net_dev,
  321. "chan %d start event queue\n", channel->channel);
  322. /* Make sure the NAPI handler sees the enabled flag set */
  323. channel->enabled = true;
  324. smp_wmb();
  325. efx_channel_enable(channel);
  326. napi_enable(&channel->napi_str);
  327. efx_nic_eventq_read_ack(channel);
  328. }
  329. /* Disable event queue processing and NAPI */
  330. void efx_stop_eventq(struct efx_channel *channel)
  331. {
  332. if (!channel->enabled)
  333. return;
  334. napi_disable(&channel->napi_str);
  335. while (!efx_channel_disable(channel))
  336. usleep_range(1000, 20000);
  337. channel->enabled = false;
  338. }
  339. static void efx_fini_eventq(struct efx_channel *channel)
  340. {
  341. if (!channel->eventq_init)
  342. return;
  343. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  344. "chan %d fini event queue\n", channel->channel);
  345. efx_nic_fini_eventq(channel);
  346. channel->eventq_init = false;
  347. }
  348. static void efx_remove_eventq(struct efx_channel *channel)
  349. {
  350. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  351. "chan %d remove event queue\n", channel->channel);
  352. efx_nic_remove_eventq(channel);
  353. }
  354. /**************************************************************************
  355. *
  356. * Channel handling
  357. *
  358. *************************************************************************/
  359. /* Allocate and initialise a channel structure. */
  360. static struct efx_channel *
  361. efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
  362. {
  363. struct efx_channel *channel;
  364. struct efx_rx_queue *rx_queue;
  365. struct efx_tx_queue *tx_queue;
  366. int j;
  367. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  368. if (!channel)
  369. return NULL;
  370. channel->efx = efx;
  371. channel->channel = i;
  372. channel->type = &efx_default_channel_type;
  373. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  374. tx_queue = &channel->tx_queue[j];
  375. tx_queue->efx = efx;
  376. tx_queue->queue = i * EFX_TXQ_TYPES + j;
  377. tx_queue->channel = channel;
  378. }
  379. rx_queue = &channel->rx_queue;
  380. rx_queue->efx = efx;
  381. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  382. (unsigned long)rx_queue);
  383. return channel;
  384. }
  385. /* Allocate and initialise a channel structure, copying parameters
  386. * (but not resources) from an old channel structure.
  387. */
  388. static struct efx_channel *
  389. efx_copy_channel(const struct efx_channel *old_channel)
  390. {
  391. struct efx_channel *channel;
  392. struct efx_rx_queue *rx_queue;
  393. struct efx_tx_queue *tx_queue;
  394. int j;
  395. channel = kmalloc(sizeof(*channel), GFP_KERNEL);
  396. if (!channel)
  397. return NULL;
  398. *channel = *old_channel;
  399. channel->napi_dev = NULL;
  400. memset(&channel->eventq, 0, sizeof(channel->eventq));
  401. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  402. tx_queue = &channel->tx_queue[j];
  403. if (tx_queue->channel)
  404. tx_queue->channel = channel;
  405. tx_queue->buffer = NULL;
  406. memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
  407. }
  408. rx_queue = &channel->rx_queue;
  409. rx_queue->buffer = NULL;
  410. memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
  411. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  412. (unsigned long)rx_queue);
  413. return channel;
  414. }
  415. static int efx_probe_channel(struct efx_channel *channel)
  416. {
  417. struct efx_tx_queue *tx_queue;
  418. struct efx_rx_queue *rx_queue;
  419. int rc;
  420. netif_dbg(channel->efx, probe, channel->efx->net_dev,
  421. "creating channel %d\n", channel->channel);
  422. rc = channel->type->pre_probe(channel);
  423. if (rc)
  424. goto fail;
  425. rc = efx_probe_eventq(channel);
  426. if (rc)
  427. goto fail;
  428. efx_for_each_channel_tx_queue(tx_queue, channel) {
  429. rc = efx_probe_tx_queue(tx_queue);
  430. if (rc)
  431. goto fail;
  432. }
  433. efx_for_each_channel_rx_queue(rx_queue, channel) {
  434. rc = efx_probe_rx_queue(rx_queue);
  435. if (rc)
  436. goto fail;
  437. }
  438. return 0;
  439. fail:
  440. efx_remove_channel(channel);
  441. return rc;
  442. }
  443. static void
  444. efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
  445. {
  446. struct efx_nic *efx = channel->efx;
  447. const char *type;
  448. int number;
  449. number = channel->channel;
  450. if (efx->tx_channel_offset == 0) {
  451. type = "";
  452. } else if (channel->channel < efx->tx_channel_offset) {
  453. type = "-rx";
  454. } else {
  455. type = "-tx";
  456. number -= efx->tx_channel_offset;
  457. }
  458. snprintf(buf, len, "%s%s-%d", efx->name, type, number);
  459. }
  460. static void efx_set_channel_names(struct efx_nic *efx)
  461. {
  462. struct efx_channel *channel;
  463. efx_for_each_channel(channel, efx)
  464. channel->type->get_name(channel,
  465. efx->msi_context[channel->channel].name,
  466. sizeof(efx->msi_context[0].name));
  467. }
  468. static int efx_probe_channels(struct efx_nic *efx)
  469. {
  470. struct efx_channel *channel;
  471. int rc;
  472. /* Restart special buffer allocation */
  473. efx->next_buffer_table = 0;
  474. /* Probe channels in reverse, so that any 'extra' channels
  475. * use the start of the buffer table. This allows the traffic
  476. * channels to be resized without moving them or wasting the
  477. * entries before them.
  478. */
  479. efx_for_each_channel_rev(channel, efx) {
  480. rc = efx_probe_channel(channel);
  481. if (rc) {
  482. netif_err(efx, probe, efx->net_dev,
  483. "failed to create channel %d\n",
  484. channel->channel);
  485. goto fail;
  486. }
  487. }
  488. efx_set_channel_names(efx);
  489. return 0;
  490. fail:
  491. efx_remove_channels(efx);
  492. return rc;
  493. }
  494. /* Channels are shutdown and reinitialised whilst the NIC is running
  495. * to propagate configuration changes (mtu, checksum offload), or
  496. * to clear hardware error conditions
  497. */
  498. static void efx_start_datapath(struct efx_nic *efx)
  499. {
  500. bool old_rx_scatter = efx->rx_scatter;
  501. struct efx_tx_queue *tx_queue;
  502. struct efx_rx_queue *rx_queue;
  503. struct efx_channel *channel;
  504. size_t rx_buf_len;
  505. /* Calculate the rx buffer allocation parameters required to
  506. * support the current MTU, including padding for header
  507. * alignment and overruns.
  508. */
  509. efx->rx_dma_len = (efx->rx_prefix_size +
  510. EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
  511. efx->type->rx_buffer_padding);
  512. rx_buf_len = (sizeof(struct efx_rx_page_state) +
  513. efx->rx_ip_align + efx->rx_dma_len);
  514. if (rx_buf_len <= PAGE_SIZE) {
  515. efx->rx_scatter = efx->type->always_rx_scatter;
  516. efx->rx_buffer_order = 0;
  517. } else if (efx->type->can_rx_scatter) {
  518. BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
  519. BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
  520. 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
  521. EFX_RX_BUF_ALIGNMENT) >
  522. PAGE_SIZE);
  523. efx->rx_scatter = true;
  524. efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
  525. efx->rx_buffer_order = 0;
  526. } else {
  527. efx->rx_scatter = false;
  528. efx->rx_buffer_order = get_order(rx_buf_len);
  529. }
  530. efx_rx_config_page_split(efx);
  531. if (efx->rx_buffer_order)
  532. netif_dbg(efx, drv, efx->net_dev,
  533. "RX buf len=%u; page order=%u batch=%u\n",
  534. efx->rx_dma_len, efx->rx_buffer_order,
  535. efx->rx_pages_per_batch);
  536. else
  537. netif_dbg(efx, drv, efx->net_dev,
  538. "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
  539. efx->rx_dma_len, efx->rx_page_buf_step,
  540. efx->rx_bufs_per_page, efx->rx_pages_per_batch);
  541. /* RX filters may also have scatter-enabled flags */
  542. if (efx->rx_scatter != old_rx_scatter)
  543. efx->type->filter_update_rx_scatter(efx);
  544. /* We must keep at least one descriptor in a TX ring empty.
  545. * We could avoid this when the queue size does not exactly
  546. * match the hardware ring size, but it's not that important.
  547. * Therefore we stop the queue when one more skb might fill
  548. * the ring completely. We wake it when half way back to
  549. * empty.
  550. */
  551. efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
  552. efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
  553. /* Initialise the channels */
  554. efx_for_each_channel(channel, efx) {
  555. efx_for_each_channel_tx_queue(tx_queue, channel) {
  556. efx_init_tx_queue(tx_queue);
  557. atomic_inc(&efx->active_queues);
  558. }
  559. efx_for_each_channel_rx_queue(rx_queue, channel) {
  560. efx_init_rx_queue(rx_queue);
  561. atomic_inc(&efx->active_queues);
  562. efx_stop_eventq(channel);
  563. efx_fast_push_rx_descriptors(rx_queue, false);
  564. efx_start_eventq(channel);
  565. }
  566. WARN_ON(channel->rx_pkt_n_frags);
  567. }
  568. efx_ptp_start_datapath(efx);
  569. if (netif_device_present(efx->net_dev))
  570. netif_tx_wake_all_queues(efx->net_dev);
  571. }
  572. static void efx_stop_datapath(struct efx_nic *efx)
  573. {
  574. struct efx_channel *channel;
  575. struct efx_tx_queue *tx_queue;
  576. struct efx_rx_queue *rx_queue;
  577. int rc;
  578. EFX_ASSERT_RESET_SERIALISED(efx);
  579. BUG_ON(efx->port_enabled);
  580. efx_ptp_stop_datapath(efx);
  581. /* Stop RX refill */
  582. efx_for_each_channel(channel, efx) {
  583. efx_for_each_channel_rx_queue(rx_queue, channel)
  584. rx_queue->refill_enabled = false;
  585. }
  586. efx_for_each_channel(channel, efx) {
  587. /* RX packet processing is pipelined, so wait for the
  588. * NAPI handler to complete. At least event queue 0
  589. * might be kept active by non-data events, so don't
  590. * use napi_synchronize() but actually disable NAPI
  591. * temporarily.
  592. */
  593. if (efx_channel_has_rx_queue(channel)) {
  594. efx_stop_eventq(channel);
  595. efx_start_eventq(channel);
  596. }
  597. }
  598. rc = efx->type->fini_dmaq(efx);
  599. if (rc && EFX_WORKAROUND_7803(efx)) {
  600. /* Schedule a reset to recover from the flush failure. The
  601. * descriptor caches reference memory we're about to free,
  602. * but falcon_reconfigure_mac_wrapper() won't reconnect
  603. * the MACs because of the pending reset.
  604. */
  605. netif_err(efx, drv, efx->net_dev,
  606. "Resetting to recover from flush failure\n");
  607. efx_schedule_reset(efx, RESET_TYPE_ALL);
  608. } else if (rc) {
  609. netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
  610. } else {
  611. netif_dbg(efx, drv, efx->net_dev,
  612. "successfully flushed all queues\n");
  613. }
  614. efx_for_each_channel(channel, efx) {
  615. efx_for_each_channel_rx_queue(rx_queue, channel)
  616. efx_fini_rx_queue(rx_queue);
  617. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  618. efx_fini_tx_queue(tx_queue);
  619. }
  620. }
  621. static void efx_remove_channel(struct efx_channel *channel)
  622. {
  623. struct efx_tx_queue *tx_queue;
  624. struct efx_rx_queue *rx_queue;
  625. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  626. "destroy chan %d\n", channel->channel);
  627. efx_for_each_channel_rx_queue(rx_queue, channel)
  628. efx_remove_rx_queue(rx_queue);
  629. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  630. efx_remove_tx_queue(tx_queue);
  631. efx_remove_eventq(channel);
  632. channel->type->post_remove(channel);
  633. }
  634. static void efx_remove_channels(struct efx_nic *efx)
  635. {
  636. struct efx_channel *channel;
  637. efx_for_each_channel(channel, efx)
  638. efx_remove_channel(channel);
  639. }
  640. int
  641. efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
  642. {
  643. struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
  644. u32 old_rxq_entries, old_txq_entries;
  645. unsigned i, next_buffer_table = 0;
  646. int rc, rc2;
  647. rc = efx_check_disabled(efx);
  648. if (rc)
  649. return rc;
  650. /* Not all channels should be reallocated. We must avoid
  651. * reallocating their buffer table entries.
  652. */
  653. efx_for_each_channel(channel, efx) {
  654. struct efx_rx_queue *rx_queue;
  655. struct efx_tx_queue *tx_queue;
  656. if (channel->type->copy)
  657. continue;
  658. next_buffer_table = max(next_buffer_table,
  659. channel->eventq.index +
  660. channel->eventq.entries);
  661. efx_for_each_channel_rx_queue(rx_queue, channel)
  662. next_buffer_table = max(next_buffer_table,
  663. rx_queue->rxd.index +
  664. rx_queue->rxd.entries);
  665. efx_for_each_channel_tx_queue(tx_queue, channel)
  666. next_buffer_table = max(next_buffer_table,
  667. tx_queue->txd.index +
  668. tx_queue->txd.entries);
  669. }
  670. efx_device_detach_sync(efx);
  671. efx_stop_all(efx);
  672. efx_soft_disable_interrupts(efx);
  673. /* Clone channels (where possible) */
  674. memset(other_channel, 0, sizeof(other_channel));
  675. for (i = 0; i < efx->n_channels; i++) {
  676. channel = efx->channel[i];
  677. if (channel->type->copy)
  678. channel = channel->type->copy(channel);
  679. if (!channel) {
  680. rc = -ENOMEM;
  681. goto out;
  682. }
  683. other_channel[i] = channel;
  684. }
  685. /* Swap entry counts and channel pointers */
  686. old_rxq_entries = efx->rxq_entries;
  687. old_txq_entries = efx->txq_entries;
  688. efx->rxq_entries = rxq_entries;
  689. efx->txq_entries = txq_entries;
  690. for (i = 0; i < efx->n_channels; i++) {
  691. channel = efx->channel[i];
  692. efx->channel[i] = other_channel[i];
  693. other_channel[i] = channel;
  694. }
  695. /* Restart buffer table allocation */
  696. efx->next_buffer_table = next_buffer_table;
  697. for (i = 0; i < efx->n_channels; i++) {
  698. channel = efx->channel[i];
  699. if (!channel->type->copy)
  700. continue;
  701. rc = efx_probe_channel(channel);
  702. if (rc)
  703. goto rollback;
  704. efx_init_napi_channel(efx->channel[i]);
  705. }
  706. out:
  707. /* Destroy unused channel structures */
  708. for (i = 0; i < efx->n_channels; i++) {
  709. channel = other_channel[i];
  710. if (channel && channel->type->copy) {
  711. efx_fini_napi_channel(channel);
  712. efx_remove_channel(channel);
  713. kfree(channel);
  714. }
  715. }
  716. rc2 = efx_soft_enable_interrupts(efx);
  717. if (rc2) {
  718. rc = rc ? rc : rc2;
  719. netif_err(efx, drv, efx->net_dev,
  720. "unable to restart interrupts on channel reallocation\n");
  721. efx_schedule_reset(efx, RESET_TYPE_DISABLE);
  722. } else {
  723. efx_start_all(efx);
  724. netif_device_attach(efx->net_dev);
  725. }
  726. return rc;
  727. rollback:
  728. /* Swap back */
  729. efx->rxq_entries = old_rxq_entries;
  730. efx->txq_entries = old_txq_entries;
  731. for (i = 0; i < efx->n_channels; i++) {
  732. channel = efx->channel[i];
  733. efx->channel[i] = other_channel[i];
  734. other_channel[i] = channel;
  735. }
  736. goto out;
  737. }
  738. void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
  739. {
  740. mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
  741. }
  742. static const struct efx_channel_type efx_default_channel_type = {
  743. .pre_probe = efx_channel_dummy_op_int,
  744. .post_remove = efx_channel_dummy_op_void,
  745. .get_name = efx_get_channel_name,
  746. .copy = efx_copy_channel,
  747. .keep_eventq = false,
  748. };
  749. int efx_channel_dummy_op_int(struct efx_channel *channel)
  750. {
  751. return 0;
  752. }
  753. void efx_channel_dummy_op_void(struct efx_channel *channel)
  754. {
  755. }
  756. /**************************************************************************
  757. *
  758. * Port handling
  759. *
  760. **************************************************************************/
  761. /* This ensures that the kernel is kept informed (via
  762. * netif_carrier_on/off) of the link status, and also maintains the
  763. * link status's stop on the port's TX queue.
  764. */
  765. void efx_link_status_changed(struct efx_nic *efx)
  766. {
  767. struct efx_link_state *link_state = &efx->link_state;
  768. /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
  769. * that no events are triggered between unregister_netdev() and the
  770. * driver unloading. A more general condition is that NETDEV_CHANGE
  771. * can only be generated between NETDEV_UP and NETDEV_DOWN */
  772. if (!netif_running(efx->net_dev))
  773. return;
  774. if (link_state->up != netif_carrier_ok(efx->net_dev)) {
  775. efx->n_link_state_changes++;
  776. if (link_state->up)
  777. netif_carrier_on(efx->net_dev);
  778. else
  779. netif_carrier_off(efx->net_dev);
  780. }
  781. /* Status message for kernel log */
  782. if (link_state->up)
  783. netif_info(efx, link, efx->net_dev,
  784. "link up at %uMbps %s-duplex (MTU %d)\n",
  785. link_state->speed, link_state->fd ? "full" : "half",
  786. efx->net_dev->mtu);
  787. else
  788. netif_info(efx, link, efx->net_dev, "link down\n");
  789. }
  790. void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
  791. {
  792. efx->link_advertising = advertising;
  793. if (advertising) {
  794. if (advertising & ADVERTISED_Pause)
  795. efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
  796. else
  797. efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
  798. if (advertising & ADVERTISED_Asym_Pause)
  799. efx->wanted_fc ^= EFX_FC_TX;
  800. }
  801. }
  802. void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
  803. {
  804. efx->wanted_fc = wanted_fc;
  805. if (efx->link_advertising) {
  806. if (wanted_fc & EFX_FC_RX)
  807. efx->link_advertising |= (ADVERTISED_Pause |
  808. ADVERTISED_Asym_Pause);
  809. else
  810. efx->link_advertising &= ~(ADVERTISED_Pause |
  811. ADVERTISED_Asym_Pause);
  812. if (wanted_fc & EFX_FC_TX)
  813. efx->link_advertising ^= ADVERTISED_Asym_Pause;
  814. }
  815. }
  816. static void efx_fini_port(struct efx_nic *efx);
  817. /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
  818. * the MAC appropriately. All other PHY configuration changes are pushed
  819. * through phy_op->set_settings(), and pushed asynchronously to the MAC
  820. * through efx_monitor().
  821. *
  822. * Callers must hold the mac_lock
  823. */
  824. int __efx_reconfigure_port(struct efx_nic *efx)
  825. {
  826. enum efx_phy_mode phy_mode;
  827. int rc;
  828. WARN_ON(!mutex_is_locked(&efx->mac_lock));
  829. /* Disable PHY transmit in mac level loopbacks */
  830. phy_mode = efx->phy_mode;
  831. if (LOOPBACK_INTERNAL(efx))
  832. efx->phy_mode |= PHY_MODE_TX_DISABLED;
  833. else
  834. efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
  835. rc = efx->type->reconfigure_port(efx);
  836. if (rc)
  837. efx->phy_mode = phy_mode;
  838. return rc;
  839. }
  840. /* Reinitialise the MAC to pick up new PHY settings, even if the port is
  841. * disabled. */
  842. int efx_reconfigure_port(struct efx_nic *efx)
  843. {
  844. int rc;
  845. EFX_ASSERT_RESET_SERIALISED(efx);
  846. mutex_lock(&efx->mac_lock);
  847. rc = __efx_reconfigure_port(efx);
  848. mutex_unlock(&efx->mac_lock);
  849. return rc;
  850. }
  851. /* Asynchronous work item for changing MAC promiscuity and multicast
  852. * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
  853. * MAC directly. */
  854. static void efx_mac_work(struct work_struct *data)
  855. {
  856. struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
  857. mutex_lock(&efx->mac_lock);
  858. if (efx->port_enabled)
  859. efx->type->reconfigure_mac(efx);
  860. mutex_unlock(&efx->mac_lock);
  861. }
  862. static int efx_probe_port(struct efx_nic *efx)
  863. {
  864. int rc;
  865. netif_dbg(efx, probe, efx->net_dev, "create port\n");
  866. if (phy_flash_cfg)
  867. efx->phy_mode = PHY_MODE_SPECIAL;
  868. /* Connect up MAC/PHY operations table */
  869. rc = efx->type->probe_port(efx);
  870. if (rc)
  871. return rc;
  872. /* Initialise MAC address to permanent address */
  873. ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
  874. return 0;
  875. }
  876. static int efx_init_port(struct efx_nic *efx)
  877. {
  878. int rc;
  879. netif_dbg(efx, drv, efx->net_dev, "init port\n");
  880. mutex_lock(&efx->mac_lock);
  881. rc = efx->phy_op->init(efx);
  882. if (rc)
  883. goto fail1;
  884. efx->port_initialized = true;
  885. /* Reconfigure the MAC before creating dma queues (required for
  886. * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
  887. efx->type->reconfigure_mac(efx);
  888. /* Ensure the PHY advertises the correct flow control settings */
  889. rc = efx->phy_op->reconfigure(efx);
  890. if (rc)
  891. goto fail2;
  892. mutex_unlock(&efx->mac_lock);
  893. return 0;
  894. fail2:
  895. efx->phy_op->fini(efx);
  896. fail1:
  897. mutex_unlock(&efx->mac_lock);
  898. return rc;
  899. }
  900. static void efx_start_port(struct efx_nic *efx)
  901. {
  902. netif_dbg(efx, ifup, efx->net_dev, "start port\n");
  903. BUG_ON(efx->port_enabled);
  904. mutex_lock(&efx->mac_lock);
  905. efx->port_enabled = true;
  906. /* Ensure MAC ingress/egress is enabled */
  907. efx->type->reconfigure_mac(efx);
  908. mutex_unlock(&efx->mac_lock);
  909. }
  910. /* Cancel work for MAC reconfiguration, periodic hardware monitoring
  911. * and the async self-test, wait for them to finish and prevent them
  912. * being scheduled again. This doesn't cover online resets, which
  913. * should only be cancelled when removing the device.
  914. */
  915. static void efx_stop_port(struct efx_nic *efx)
  916. {
  917. netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
  918. EFX_ASSERT_RESET_SERIALISED(efx);
  919. mutex_lock(&efx->mac_lock);
  920. efx->port_enabled = false;
  921. mutex_unlock(&efx->mac_lock);
  922. /* Serialise against efx_set_multicast_list() */
  923. netif_addr_lock_bh(efx->net_dev);
  924. netif_addr_unlock_bh(efx->net_dev);
  925. cancel_delayed_work_sync(&efx->monitor_work);
  926. efx_selftest_async_cancel(efx);
  927. cancel_work_sync(&efx->mac_work);
  928. }
  929. static void efx_fini_port(struct efx_nic *efx)
  930. {
  931. netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
  932. if (!efx->port_initialized)
  933. return;
  934. efx->phy_op->fini(efx);
  935. efx->port_initialized = false;
  936. efx->link_state.up = false;
  937. efx_link_status_changed(efx);
  938. }
  939. static void efx_remove_port(struct efx_nic *efx)
  940. {
  941. netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
  942. efx->type->remove_port(efx);
  943. }
  944. /**************************************************************************
  945. *
  946. * NIC handling
  947. *
  948. **************************************************************************/
  949. static LIST_HEAD(efx_primary_list);
  950. static LIST_HEAD(efx_unassociated_list);
  951. static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
  952. {
  953. return left->type == right->type &&
  954. left->vpd_sn && right->vpd_sn &&
  955. !strcmp(left->vpd_sn, right->vpd_sn);
  956. }
  957. static void efx_associate(struct efx_nic *efx)
  958. {
  959. struct efx_nic *other, *next;
  960. if (efx->primary == efx) {
  961. /* Adding primary function; look for secondaries */
  962. netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
  963. list_add_tail(&efx->node, &efx_primary_list);
  964. list_for_each_entry_safe(other, next, &efx_unassociated_list,
  965. node) {
  966. if (efx_same_controller(efx, other)) {
  967. list_del(&other->node);
  968. netif_dbg(other, probe, other->net_dev,
  969. "moving to secondary list of %s %s\n",
  970. pci_name(efx->pci_dev),
  971. efx->net_dev->name);
  972. list_add_tail(&other->node,
  973. &efx->secondary_list);
  974. other->primary = efx;
  975. }
  976. }
  977. } else {
  978. /* Adding secondary function; look for primary */
  979. list_for_each_entry(other, &efx_primary_list, node) {
  980. if (efx_same_controller(efx, other)) {
  981. netif_dbg(efx, probe, efx->net_dev,
  982. "adding to secondary list of %s %s\n",
  983. pci_name(other->pci_dev),
  984. other->net_dev->name);
  985. list_add_tail(&efx->node,
  986. &other->secondary_list);
  987. efx->primary = other;
  988. return;
  989. }
  990. }
  991. netif_dbg(efx, probe, efx->net_dev,
  992. "adding to unassociated list\n");
  993. list_add_tail(&efx->node, &efx_unassociated_list);
  994. }
  995. }
  996. static void efx_dissociate(struct efx_nic *efx)
  997. {
  998. struct efx_nic *other, *next;
  999. list_del(&efx->node);
  1000. efx->primary = NULL;
  1001. list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
  1002. list_del(&other->node);
  1003. netif_dbg(other, probe, other->net_dev,
  1004. "moving to unassociated list\n");
  1005. list_add_tail(&other->node, &efx_unassociated_list);
  1006. other->primary = NULL;
  1007. }
  1008. }
  1009. /* This configures the PCI device to enable I/O and DMA. */
  1010. static int efx_init_io(struct efx_nic *efx)
  1011. {
  1012. struct pci_dev *pci_dev = efx->pci_dev;
  1013. dma_addr_t dma_mask = efx->type->max_dma_mask;
  1014. unsigned int mem_map_size = efx->type->mem_map_size(efx);
  1015. int rc;
  1016. netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
  1017. rc = pci_enable_device(pci_dev);
  1018. if (rc) {
  1019. netif_err(efx, probe, efx->net_dev,
  1020. "failed to enable PCI device\n");
  1021. goto fail1;
  1022. }
  1023. pci_set_master(pci_dev);
  1024. /* Set the PCI DMA mask. Try all possibilities from our
  1025. * genuine mask down to 32 bits, because some architectures
  1026. * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
  1027. * masks event though they reject 46 bit masks.
  1028. */
  1029. while (dma_mask > 0x7fffffffUL) {
  1030. if (dma_supported(&pci_dev->dev, dma_mask)) {
  1031. rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
  1032. if (rc == 0)
  1033. break;
  1034. }
  1035. dma_mask >>= 1;
  1036. }
  1037. if (rc) {
  1038. netif_err(efx, probe, efx->net_dev,
  1039. "could not find a suitable DMA mask\n");
  1040. goto fail2;
  1041. }
  1042. netif_dbg(efx, probe, efx->net_dev,
  1043. "using DMA mask %llx\n", (unsigned long long) dma_mask);
  1044. efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
  1045. rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
  1046. if (rc) {
  1047. netif_err(efx, probe, efx->net_dev,
  1048. "request for memory BAR failed\n");
  1049. rc = -EIO;
  1050. goto fail3;
  1051. }
  1052. efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
  1053. if (!efx->membase) {
  1054. netif_err(efx, probe, efx->net_dev,
  1055. "could not map memory BAR at %llx+%x\n",
  1056. (unsigned long long)efx->membase_phys, mem_map_size);
  1057. rc = -ENOMEM;
  1058. goto fail4;
  1059. }
  1060. netif_dbg(efx, probe, efx->net_dev,
  1061. "memory BAR at %llx+%x (virtual %p)\n",
  1062. (unsigned long long)efx->membase_phys, mem_map_size,
  1063. efx->membase);
  1064. return 0;
  1065. fail4:
  1066. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  1067. fail3:
  1068. efx->membase_phys = 0;
  1069. fail2:
  1070. pci_disable_device(efx->pci_dev);
  1071. fail1:
  1072. return rc;
  1073. }
  1074. static void efx_fini_io(struct efx_nic *efx)
  1075. {
  1076. netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
  1077. if (efx->membase) {
  1078. iounmap(efx->membase);
  1079. efx->membase = NULL;
  1080. }
  1081. if (efx->membase_phys) {
  1082. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  1083. efx->membase_phys = 0;
  1084. }
  1085. pci_disable_device(efx->pci_dev);
  1086. }
  1087. static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
  1088. {
  1089. cpumask_var_t thread_mask;
  1090. unsigned int count;
  1091. int cpu;
  1092. if (rss_cpus) {
  1093. count = rss_cpus;
  1094. } else {
  1095. if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
  1096. netif_warn(efx, probe, efx->net_dev,
  1097. "RSS disabled due to allocation failure\n");
  1098. return 1;
  1099. }
  1100. count = 0;
  1101. for_each_online_cpu(cpu) {
  1102. if (!cpumask_test_cpu(cpu, thread_mask)) {
  1103. ++count;
  1104. cpumask_or(thread_mask, thread_mask,
  1105. topology_thread_cpumask(cpu));
  1106. }
  1107. }
  1108. free_cpumask_var(thread_mask);
  1109. }
  1110. /* If RSS is requested for the PF *and* VFs then we can't write RSS
  1111. * table entries that are inaccessible to VFs
  1112. */
  1113. if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
  1114. count > efx_vf_size(efx)) {
  1115. netif_warn(efx, probe, efx->net_dev,
  1116. "Reducing number of RSS channels from %u to %u for "
  1117. "VF support. Increase vf-msix-limit to use more "
  1118. "channels on the PF.\n",
  1119. count, efx_vf_size(efx));
  1120. count = efx_vf_size(efx);
  1121. }
  1122. return count;
  1123. }
  1124. /* Probe the number and type of interrupts we are able to obtain, and
  1125. * the resulting numbers of channels and RX queues.
  1126. */
  1127. static int efx_probe_interrupts(struct efx_nic *efx)
  1128. {
  1129. unsigned int extra_channels = 0;
  1130. unsigned int i, j;
  1131. int rc;
  1132. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
  1133. if (efx->extra_channel_type[i])
  1134. ++extra_channels;
  1135. if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
  1136. struct msix_entry xentries[EFX_MAX_CHANNELS];
  1137. unsigned int n_channels;
  1138. n_channels = efx_wanted_parallelism(efx);
  1139. if (separate_tx_channels)
  1140. n_channels *= 2;
  1141. n_channels += extra_channels;
  1142. n_channels = min(n_channels, efx->max_channels);
  1143. for (i = 0; i < n_channels; i++)
  1144. xentries[i].entry = i;
  1145. rc = pci_enable_msix_range(efx->pci_dev,
  1146. xentries, 1, n_channels);
  1147. if (rc < 0) {
  1148. /* Fall back to single channel MSI */
  1149. efx->interrupt_mode = EFX_INT_MODE_MSI;
  1150. netif_err(efx, drv, efx->net_dev,
  1151. "could not enable MSI-X\n");
  1152. } else if (rc < n_channels) {
  1153. netif_err(efx, drv, efx->net_dev,
  1154. "WARNING: Insufficient MSI-X vectors"
  1155. " available (%d < %u).\n", rc, n_channels);
  1156. netif_err(efx, drv, efx->net_dev,
  1157. "WARNING: Performance may be reduced.\n");
  1158. n_channels = rc;
  1159. }
  1160. if (rc > 0) {
  1161. efx->n_channels = n_channels;
  1162. if (n_channels > extra_channels)
  1163. n_channels -= extra_channels;
  1164. if (separate_tx_channels) {
  1165. efx->n_tx_channels = max(n_channels / 2, 1U);
  1166. efx->n_rx_channels = max(n_channels -
  1167. efx->n_tx_channels,
  1168. 1U);
  1169. } else {
  1170. efx->n_tx_channels = n_channels;
  1171. efx->n_rx_channels = n_channels;
  1172. }
  1173. for (i = 0; i < efx->n_channels; i++)
  1174. efx_get_channel(efx, i)->irq =
  1175. xentries[i].vector;
  1176. }
  1177. }
  1178. /* Try single interrupt MSI */
  1179. if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
  1180. efx->n_channels = 1;
  1181. efx->n_rx_channels = 1;
  1182. efx->n_tx_channels = 1;
  1183. rc = pci_enable_msi(efx->pci_dev);
  1184. if (rc == 0) {
  1185. efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
  1186. } else {
  1187. netif_err(efx, drv, efx->net_dev,
  1188. "could not enable MSI\n");
  1189. efx->interrupt_mode = EFX_INT_MODE_LEGACY;
  1190. }
  1191. }
  1192. /* Assume legacy interrupts */
  1193. if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
  1194. efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
  1195. efx->n_rx_channels = 1;
  1196. efx->n_tx_channels = 1;
  1197. efx->legacy_irq = efx->pci_dev->irq;
  1198. }
  1199. /* Assign extra channels if possible */
  1200. j = efx->n_channels;
  1201. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
  1202. if (!efx->extra_channel_type[i])
  1203. continue;
  1204. if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
  1205. efx->n_channels <= extra_channels) {
  1206. efx->extra_channel_type[i]->handle_no_channel(efx);
  1207. } else {
  1208. --j;
  1209. efx_get_channel(efx, j)->type =
  1210. efx->extra_channel_type[i];
  1211. }
  1212. }
  1213. /* RSS might be usable on VFs even if it is disabled on the PF */
  1214. efx->rss_spread = ((efx->n_rx_channels > 1 ||
  1215. !efx->type->sriov_wanted(efx)) ?
  1216. efx->n_rx_channels : efx_vf_size(efx));
  1217. return 0;
  1218. }
  1219. static int efx_soft_enable_interrupts(struct efx_nic *efx)
  1220. {
  1221. struct efx_channel *channel, *end_channel;
  1222. int rc;
  1223. BUG_ON(efx->state == STATE_DISABLED);
  1224. efx->irq_soft_enabled = true;
  1225. smp_wmb();
  1226. efx_for_each_channel(channel, efx) {
  1227. if (!channel->type->keep_eventq) {
  1228. rc = efx_init_eventq(channel);
  1229. if (rc)
  1230. goto fail;
  1231. }
  1232. efx_start_eventq(channel);
  1233. }
  1234. efx_mcdi_mode_event(efx);
  1235. return 0;
  1236. fail:
  1237. end_channel = channel;
  1238. efx_for_each_channel(channel, efx) {
  1239. if (channel == end_channel)
  1240. break;
  1241. efx_stop_eventq(channel);
  1242. if (!channel->type->keep_eventq)
  1243. efx_fini_eventq(channel);
  1244. }
  1245. return rc;
  1246. }
  1247. static void efx_soft_disable_interrupts(struct efx_nic *efx)
  1248. {
  1249. struct efx_channel *channel;
  1250. if (efx->state == STATE_DISABLED)
  1251. return;
  1252. efx_mcdi_mode_poll(efx);
  1253. efx->irq_soft_enabled = false;
  1254. smp_wmb();
  1255. if (efx->legacy_irq)
  1256. synchronize_irq(efx->legacy_irq);
  1257. efx_for_each_channel(channel, efx) {
  1258. if (channel->irq)
  1259. synchronize_irq(channel->irq);
  1260. efx_stop_eventq(channel);
  1261. if (!channel->type->keep_eventq)
  1262. efx_fini_eventq(channel);
  1263. }
  1264. /* Flush the asynchronous MCDI request queue */
  1265. efx_mcdi_flush_async(efx);
  1266. }
  1267. static int efx_enable_interrupts(struct efx_nic *efx)
  1268. {
  1269. struct efx_channel *channel, *end_channel;
  1270. int rc;
  1271. BUG_ON(efx->state == STATE_DISABLED);
  1272. if (efx->eeh_disabled_legacy_irq) {
  1273. enable_irq(efx->legacy_irq);
  1274. efx->eeh_disabled_legacy_irq = false;
  1275. }
  1276. efx->type->irq_enable_master(efx);
  1277. efx_for_each_channel(channel, efx) {
  1278. if (channel->type->keep_eventq) {
  1279. rc = efx_init_eventq(channel);
  1280. if (rc)
  1281. goto fail;
  1282. }
  1283. }
  1284. rc = efx_soft_enable_interrupts(efx);
  1285. if (rc)
  1286. goto fail;
  1287. return 0;
  1288. fail:
  1289. end_channel = channel;
  1290. efx_for_each_channel(channel, efx) {
  1291. if (channel == end_channel)
  1292. break;
  1293. if (channel->type->keep_eventq)
  1294. efx_fini_eventq(channel);
  1295. }
  1296. efx->type->irq_disable_non_ev(efx);
  1297. return rc;
  1298. }
  1299. static void efx_disable_interrupts(struct efx_nic *efx)
  1300. {
  1301. struct efx_channel *channel;
  1302. efx_soft_disable_interrupts(efx);
  1303. efx_for_each_channel(channel, efx) {
  1304. if (channel->type->keep_eventq)
  1305. efx_fini_eventq(channel);
  1306. }
  1307. efx->type->irq_disable_non_ev(efx);
  1308. }
  1309. static void efx_remove_interrupts(struct efx_nic *efx)
  1310. {
  1311. struct efx_channel *channel;
  1312. /* Remove MSI/MSI-X interrupts */
  1313. efx_for_each_channel(channel, efx)
  1314. channel->irq = 0;
  1315. pci_disable_msi(efx->pci_dev);
  1316. pci_disable_msix(efx->pci_dev);
  1317. /* Remove legacy interrupt */
  1318. efx->legacy_irq = 0;
  1319. }
  1320. static void efx_set_channels(struct efx_nic *efx)
  1321. {
  1322. struct efx_channel *channel;
  1323. struct efx_tx_queue *tx_queue;
  1324. efx->tx_channel_offset =
  1325. separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
  1326. /* We need to mark which channels really have RX and TX
  1327. * queues, and adjust the TX queue numbers if we have separate
  1328. * RX-only and TX-only channels.
  1329. */
  1330. efx_for_each_channel(channel, efx) {
  1331. if (channel->channel < efx->n_rx_channels)
  1332. channel->rx_queue.core_index = channel->channel;
  1333. else
  1334. channel->rx_queue.core_index = -1;
  1335. efx_for_each_channel_tx_queue(tx_queue, channel)
  1336. tx_queue->queue -= (efx->tx_channel_offset *
  1337. EFX_TXQ_TYPES);
  1338. }
  1339. }
  1340. static int efx_probe_nic(struct efx_nic *efx)
  1341. {
  1342. size_t i;
  1343. int rc;
  1344. netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
  1345. /* Carry out hardware-type specific initialisation */
  1346. rc = efx->type->probe(efx);
  1347. if (rc)
  1348. return rc;
  1349. /* Determine the number of channels and queues by trying to hook
  1350. * in MSI-X interrupts. */
  1351. rc = efx_probe_interrupts(efx);
  1352. if (rc)
  1353. goto fail1;
  1354. efx_set_channels(efx);
  1355. rc = efx->type->dimension_resources(efx);
  1356. if (rc)
  1357. goto fail2;
  1358. if (efx->n_channels > 1)
  1359. netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
  1360. for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
  1361. efx->rx_indir_table[i] =
  1362. ethtool_rxfh_indir_default(i, efx->rss_spread);
  1363. netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
  1364. netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
  1365. /* Initialise the interrupt moderation settings */
  1366. efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
  1367. true);
  1368. return 0;
  1369. fail2:
  1370. efx_remove_interrupts(efx);
  1371. fail1:
  1372. efx->type->remove(efx);
  1373. return rc;
  1374. }
  1375. static void efx_remove_nic(struct efx_nic *efx)
  1376. {
  1377. netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
  1378. efx_remove_interrupts(efx);
  1379. efx->type->remove(efx);
  1380. }
  1381. static int efx_probe_filters(struct efx_nic *efx)
  1382. {
  1383. int rc;
  1384. spin_lock_init(&efx->filter_lock);
  1385. rc = efx->type->filter_table_probe(efx);
  1386. if (rc)
  1387. return rc;
  1388. #ifdef CONFIG_RFS_ACCEL
  1389. if (efx->type->offload_features & NETIF_F_NTUPLE) {
  1390. efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
  1391. sizeof(*efx->rps_flow_id),
  1392. GFP_KERNEL);
  1393. if (!efx->rps_flow_id) {
  1394. efx->type->filter_table_remove(efx);
  1395. return -ENOMEM;
  1396. }
  1397. }
  1398. #endif
  1399. return 0;
  1400. }
  1401. static void efx_remove_filters(struct efx_nic *efx)
  1402. {
  1403. #ifdef CONFIG_RFS_ACCEL
  1404. kfree(efx->rps_flow_id);
  1405. #endif
  1406. efx->type->filter_table_remove(efx);
  1407. }
  1408. static void efx_restore_filters(struct efx_nic *efx)
  1409. {
  1410. efx->type->filter_table_restore(efx);
  1411. }
  1412. /**************************************************************************
  1413. *
  1414. * NIC startup/shutdown
  1415. *
  1416. *************************************************************************/
  1417. static int efx_probe_all(struct efx_nic *efx)
  1418. {
  1419. int rc;
  1420. rc = efx_probe_nic(efx);
  1421. if (rc) {
  1422. netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
  1423. goto fail1;
  1424. }
  1425. rc = efx_probe_port(efx);
  1426. if (rc) {
  1427. netif_err(efx, probe, efx->net_dev, "failed to create port\n");
  1428. goto fail2;
  1429. }
  1430. BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
  1431. if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
  1432. rc = -EINVAL;
  1433. goto fail3;
  1434. }
  1435. efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
  1436. rc = efx_probe_filters(efx);
  1437. if (rc) {
  1438. netif_err(efx, probe, efx->net_dev,
  1439. "failed to create filter tables\n");
  1440. goto fail3;
  1441. }
  1442. rc = efx_probe_channels(efx);
  1443. if (rc)
  1444. goto fail4;
  1445. return 0;
  1446. fail4:
  1447. efx_remove_filters(efx);
  1448. fail3:
  1449. efx_remove_port(efx);
  1450. fail2:
  1451. efx_remove_nic(efx);
  1452. fail1:
  1453. return rc;
  1454. }
  1455. /* If the interface is supposed to be running but is not, start
  1456. * the hardware and software data path, regular activity for the port
  1457. * (MAC statistics, link polling, etc.) and schedule the port to be
  1458. * reconfigured. Interrupts must already be enabled. This function
  1459. * is safe to call multiple times, so long as the NIC is not disabled.
  1460. * Requires the RTNL lock.
  1461. */
  1462. static void efx_start_all(struct efx_nic *efx)
  1463. {
  1464. EFX_ASSERT_RESET_SERIALISED(efx);
  1465. BUG_ON(efx->state == STATE_DISABLED);
  1466. /* Check that it is appropriate to restart the interface. All
  1467. * of these flags are safe to read under just the rtnl lock */
  1468. if (efx->port_enabled || !netif_running(efx->net_dev) ||
  1469. efx->reset_pending)
  1470. return;
  1471. efx_start_port(efx);
  1472. efx_start_datapath(efx);
  1473. /* Start the hardware monitor if there is one */
  1474. if (efx->type->monitor != NULL)
  1475. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1476. efx_monitor_interval);
  1477. /* If link state detection is normally event-driven, we have
  1478. * to poll now because we could have missed a change
  1479. */
  1480. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
  1481. mutex_lock(&efx->mac_lock);
  1482. if (efx->phy_op->poll(efx))
  1483. efx_link_status_changed(efx);
  1484. mutex_unlock(&efx->mac_lock);
  1485. }
  1486. efx->type->start_stats(efx);
  1487. efx->type->pull_stats(efx);
  1488. spin_lock_bh(&efx->stats_lock);
  1489. efx->type->update_stats(efx, NULL, NULL);
  1490. spin_unlock_bh(&efx->stats_lock);
  1491. }
  1492. /* Quiesce the hardware and software data path, and regular activity
  1493. * for the port without bringing the link down. Safe to call multiple
  1494. * times with the NIC in almost any state, but interrupts should be
  1495. * enabled. Requires the RTNL lock.
  1496. */
  1497. static void efx_stop_all(struct efx_nic *efx)
  1498. {
  1499. EFX_ASSERT_RESET_SERIALISED(efx);
  1500. /* port_enabled can be read safely under the rtnl lock */
  1501. if (!efx->port_enabled)
  1502. return;
  1503. /* update stats before we go down so we can accurately count
  1504. * rx_nodesc_drops
  1505. */
  1506. efx->type->pull_stats(efx);
  1507. spin_lock_bh(&efx->stats_lock);
  1508. efx->type->update_stats(efx, NULL, NULL);
  1509. spin_unlock_bh(&efx->stats_lock);
  1510. efx->type->stop_stats(efx);
  1511. efx_stop_port(efx);
  1512. /* Stop the kernel transmit interface. This is only valid if
  1513. * the device is stopped or detached; otherwise the watchdog
  1514. * may fire immediately.
  1515. */
  1516. WARN_ON(netif_running(efx->net_dev) &&
  1517. netif_device_present(efx->net_dev));
  1518. netif_tx_disable(efx->net_dev);
  1519. efx_stop_datapath(efx);
  1520. }
  1521. static void efx_remove_all(struct efx_nic *efx)
  1522. {
  1523. efx_remove_channels(efx);
  1524. efx_remove_filters(efx);
  1525. efx_remove_port(efx);
  1526. efx_remove_nic(efx);
  1527. }
  1528. /**************************************************************************
  1529. *
  1530. * Interrupt moderation
  1531. *
  1532. **************************************************************************/
  1533. static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
  1534. {
  1535. if (usecs == 0)
  1536. return 0;
  1537. if (usecs * 1000 < quantum_ns)
  1538. return 1; /* never round down to 0 */
  1539. return usecs * 1000 / quantum_ns;
  1540. }
  1541. /* Set interrupt moderation parameters */
  1542. int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
  1543. unsigned int rx_usecs, bool rx_adaptive,
  1544. bool rx_may_override_tx)
  1545. {
  1546. struct efx_channel *channel;
  1547. unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
  1548. efx->timer_quantum_ns,
  1549. 1000);
  1550. unsigned int tx_ticks;
  1551. unsigned int rx_ticks;
  1552. EFX_ASSERT_RESET_SERIALISED(efx);
  1553. if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
  1554. return -EINVAL;
  1555. tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
  1556. rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
  1557. if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
  1558. !rx_may_override_tx) {
  1559. netif_err(efx, drv, efx->net_dev, "Channels are shared. "
  1560. "RX and TX IRQ moderation must be equal\n");
  1561. return -EINVAL;
  1562. }
  1563. efx->irq_rx_adaptive = rx_adaptive;
  1564. efx->irq_rx_moderation = rx_ticks;
  1565. efx_for_each_channel(channel, efx) {
  1566. if (efx_channel_has_rx_queue(channel))
  1567. channel->irq_moderation = rx_ticks;
  1568. else if (efx_channel_has_tx_queues(channel))
  1569. channel->irq_moderation = tx_ticks;
  1570. }
  1571. return 0;
  1572. }
  1573. void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
  1574. unsigned int *rx_usecs, bool *rx_adaptive)
  1575. {
  1576. /* We must round up when converting ticks to microseconds
  1577. * because we round down when converting the other way.
  1578. */
  1579. *rx_adaptive = efx->irq_rx_adaptive;
  1580. *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
  1581. efx->timer_quantum_ns,
  1582. 1000);
  1583. /* If channels are shared between RX and TX, so is IRQ
  1584. * moderation. Otherwise, IRQ moderation is the same for all
  1585. * TX channels and is not adaptive.
  1586. */
  1587. if (efx->tx_channel_offset == 0)
  1588. *tx_usecs = *rx_usecs;
  1589. else
  1590. *tx_usecs = DIV_ROUND_UP(
  1591. efx->channel[efx->tx_channel_offset]->irq_moderation *
  1592. efx->timer_quantum_ns,
  1593. 1000);
  1594. }
  1595. /**************************************************************************
  1596. *
  1597. * Hardware monitor
  1598. *
  1599. **************************************************************************/
  1600. /* Run periodically off the general workqueue */
  1601. static void efx_monitor(struct work_struct *data)
  1602. {
  1603. struct efx_nic *efx = container_of(data, struct efx_nic,
  1604. monitor_work.work);
  1605. netif_vdbg(efx, timer, efx->net_dev,
  1606. "hardware monitor executing on CPU %d\n",
  1607. raw_smp_processor_id());
  1608. BUG_ON(efx->type->monitor == NULL);
  1609. /* If the mac_lock is already held then it is likely a port
  1610. * reconfiguration is already in place, which will likely do
  1611. * most of the work of monitor() anyway. */
  1612. if (mutex_trylock(&efx->mac_lock)) {
  1613. if (efx->port_enabled)
  1614. efx->type->monitor(efx);
  1615. mutex_unlock(&efx->mac_lock);
  1616. }
  1617. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1618. efx_monitor_interval);
  1619. }
  1620. /**************************************************************************
  1621. *
  1622. * ioctls
  1623. *
  1624. *************************************************************************/
  1625. /* Net device ioctl
  1626. * Context: process, rtnl_lock() held.
  1627. */
  1628. static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1629. {
  1630. struct efx_nic *efx = netdev_priv(net_dev);
  1631. struct mii_ioctl_data *data = if_mii(ifr);
  1632. if (cmd == SIOCSHWTSTAMP)
  1633. return efx_ptp_set_ts_config(efx, ifr);
  1634. if (cmd == SIOCGHWTSTAMP)
  1635. return efx_ptp_get_ts_config(efx, ifr);
  1636. /* Convert phy_id from older PRTAD/DEVAD format */
  1637. if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
  1638. (data->phy_id & 0xfc00) == 0x0400)
  1639. data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
  1640. return mdio_mii_ioctl(&efx->mdio, data, cmd);
  1641. }
  1642. /**************************************************************************
  1643. *
  1644. * NAPI interface
  1645. *
  1646. **************************************************************************/
  1647. static void efx_init_napi_channel(struct efx_channel *channel)
  1648. {
  1649. struct efx_nic *efx = channel->efx;
  1650. channel->napi_dev = efx->net_dev;
  1651. netif_napi_add(channel->napi_dev, &channel->napi_str,
  1652. efx_poll, napi_weight);
  1653. napi_hash_add(&channel->napi_str);
  1654. efx_channel_init_lock(channel);
  1655. }
  1656. static void efx_init_napi(struct efx_nic *efx)
  1657. {
  1658. struct efx_channel *channel;
  1659. efx_for_each_channel(channel, efx)
  1660. efx_init_napi_channel(channel);
  1661. }
  1662. static void efx_fini_napi_channel(struct efx_channel *channel)
  1663. {
  1664. if (channel->napi_dev) {
  1665. netif_napi_del(&channel->napi_str);
  1666. napi_hash_del(&channel->napi_str);
  1667. }
  1668. channel->napi_dev = NULL;
  1669. }
  1670. static void efx_fini_napi(struct efx_nic *efx)
  1671. {
  1672. struct efx_channel *channel;
  1673. efx_for_each_channel(channel, efx)
  1674. efx_fini_napi_channel(channel);
  1675. }
  1676. /**************************************************************************
  1677. *
  1678. * Kernel netpoll interface
  1679. *
  1680. *************************************************************************/
  1681. #ifdef CONFIG_NET_POLL_CONTROLLER
  1682. /* Although in the common case interrupts will be disabled, this is not
  1683. * guaranteed. However, all our work happens inside the NAPI callback,
  1684. * so no locking is required.
  1685. */
  1686. static void efx_netpoll(struct net_device *net_dev)
  1687. {
  1688. struct efx_nic *efx = netdev_priv(net_dev);
  1689. struct efx_channel *channel;
  1690. efx_for_each_channel(channel, efx)
  1691. efx_schedule_channel(channel);
  1692. }
  1693. #endif
  1694. #ifdef CONFIG_NET_RX_BUSY_POLL
  1695. static int efx_busy_poll(struct napi_struct *napi)
  1696. {
  1697. struct efx_channel *channel =
  1698. container_of(napi, struct efx_channel, napi_str);
  1699. struct efx_nic *efx = channel->efx;
  1700. int budget = 4;
  1701. int old_rx_packets, rx_packets;
  1702. if (!netif_running(efx->net_dev))
  1703. return LL_FLUSH_FAILED;
  1704. if (!efx_channel_lock_poll(channel))
  1705. return LL_FLUSH_BUSY;
  1706. old_rx_packets = channel->rx_queue.rx_packets;
  1707. efx_process_channel(channel, budget);
  1708. rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
  1709. /* There is no race condition with NAPI here.
  1710. * NAPI will automatically be rescheduled if it yielded during busy
  1711. * polling, because it was not able to take the lock and thus returned
  1712. * the full budget.
  1713. */
  1714. efx_channel_unlock_poll(channel);
  1715. return rx_packets;
  1716. }
  1717. #endif
  1718. /**************************************************************************
  1719. *
  1720. * Kernel net device interface
  1721. *
  1722. *************************************************************************/
  1723. /* Context: process, rtnl_lock() held. */
  1724. static int efx_net_open(struct net_device *net_dev)
  1725. {
  1726. struct efx_nic *efx = netdev_priv(net_dev);
  1727. int rc;
  1728. netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
  1729. raw_smp_processor_id());
  1730. rc = efx_check_disabled(efx);
  1731. if (rc)
  1732. return rc;
  1733. if (efx->phy_mode & PHY_MODE_SPECIAL)
  1734. return -EBUSY;
  1735. if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
  1736. return -EIO;
  1737. /* Notify the kernel of the link state polled during driver load,
  1738. * before the monitor starts running */
  1739. efx_link_status_changed(efx);
  1740. efx_start_all(efx);
  1741. efx_selftest_async_start(efx);
  1742. return 0;
  1743. }
  1744. /* Context: process, rtnl_lock() held.
  1745. * Note that the kernel will ignore our return code; this method
  1746. * should really be a void.
  1747. */
  1748. static int efx_net_stop(struct net_device *net_dev)
  1749. {
  1750. struct efx_nic *efx = netdev_priv(net_dev);
  1751. netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
  1752. raw_smp_processor_id());
  1753. /* Stop the device and flush all the channels */
  1754. efx_stop_all(efx);
  1755. return 0;
  1756. }
  1757. /* Context: process, dev_base_lock or RTNL held, non-blocking. */
  1758. static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
  1759. struct rtnl_link_stats64 *stats)
  1760. {
  1761. struct efx_nic *efx = netdev_priv(net_dev);
  1762. spin_lock_bh(&efx->stats_lock);
  1763. efx->type->update_stats(efx, NULL, stats);
  1764. spin_unlock_bh(&efx->stats_lock);
  1765. return stats;
  1766. }
  1767. /* Context: netif_tx_lock held, BHs disabled. */
  1768. static void efx_watchdog(struct net_device *net_dev)
  1769. {
  1770. struct efx_nic *efx = netdev_priv(net_dev);
  1771. netif_err(efx, tx_err, efx->net_dev,
  1772. "TX stuck with port_enabled=%d: resetting channels\n",
  1773. efx->port_enabled);
  1774. efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
  1775. }
  1776. /* Context: process, rtnl_lock() held. */
  1777. static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
  1778. {
  1779. struct efx_nic *efx = netdev_priv(net_dev);
  1780. int rc;
  1781. rc = efx_check_disabled(efx);
  1782. if (rc)
  1783. return rc;
  1784. if (new_mtu > EFX_MAX_MTU)
  1785. return -EINVAL;
  1786. netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
  1787. efx_device_detach_sync(efx);
  1788. efx_stop_all(efx);
  1789. mutex_lock(&efx->mac_lock);
  1790. net_dev->mtu = new_mtu;
  1791. efx->type->reconfigure_mac(efx);
  1792. mutex_unlock(&efx->mac_lock);
  1793. efx_start_all(efx);
  1794. netif_device_attach(efx->net_dev);
  1795. return 0;
  1796. }
  1797. static int efx_set_mac_address(struct net_device *net_dev, void *data)
  1798. {
  1799. struct efx_nic *efx = netdev_priv(net_dev);
  1800. struct sockaddr *addr = data;
  1801. u8 *new_addr = addr->sa_data;
  1802. if (!is_valid_ether_addr(new_addr)) {
  1803. netif_err(efx, drv, efx->net_dev,
  1804. "invalid ethernet MAC address requested: %pM\n",
  1805. new_addr);
  1806. return -EADDRNOTAVAIL;
  1807. }
  1808. ether_addr_copy(net_dev->dev_addr, new_addr);
  1809. efx->type->sriov_mac_address_changed(efx);
  1810. /* Reconfigure the MAC */
  1811. mutex_lock(&efx->mac_lock);
  1812. efx->type->reconfigure_mac(efx);
  1813. mutex_unlock(&efx->mac_lock);
  1814. return 0;
  1815. }
  1816. /* Context: netif_addr_lock held, BHs disabled. */
  1817. static void efx_set_rx_mode(struct net_device *net_dev)
  1818. {
  1819. struct efx_nic *efx = netdev_priv(net_dev);
  1820. if (efx->port_enabled)
  1821. queue_work(efx->workqueue, &efx->mac_work);
  1822. /* Otherwise efx_start_port() will do this */
  1823. }
  1824. static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
  1825. {
  1826. struct efx_nic *efx = netdev_priv(net_dev);
  1827. /* If disabling RX n-tuple filtering, clear existing filters */
  1828. if (net_dev->features & ~data & NETIF_F_NTUPLE)
  1829. return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
  1830. return 0;
  1831. }
  1832. static const struct net_device_ops efx_farch_netdev_ops = {
  1833. .ndo_open = efx_net_open,
  1834. .ndo_stop = efx_net_stop,
  1835. .ndo_get_stats64 = efx_net_stats,
  1836. .ndo_tx_timeout = efx_watchdog,
  1837. .ndo_start_xmit = efx_hard_start_xmit,
  1838. .ndo_validate_addr = eth_validate_addr,
  1839. .ndo_do_ioctl = efx_ioctl,
  1840. .ndo_change_mtu = efx_change_mtu,
  1841. .ndo_set_mac_address = efx_set_mac_address,
  1842. .ndo_set_rx_mode = efx_set_rx_mode,
  1843. .ndo_set_features = efx_set_features,
  1844. #ifdef CONFIG_SFC_SRIOV
  1845. .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
  1846. .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
  1847. .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
  1848. .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
  1849. #endif
  1850. #ifdef CONFIG_NET_POLL_CONTROLLER
  1851. .ndo_poll_controller = efx_netpoll,
  1852. #endif
  1853. .ndo_setup_tc = efx_setup_tc,
  1854. #ifdef CONFIG_NET_RX_BUSY_POLL
  1855. .ndo_busy_poll = efx_busy_poll,
  1856. #endif
  1857. #ifdef CONFIG_RFS_ACCEL
  1858. .ndo_rx_flow_steer = efx_filter_rfs,
  1859. #endif
  1860. };
  1861. static const struct net_device_ops efx_ef10_netdev_ops = {
  1862. .ndo_open = efx_net_open,
  1863. .ndo_stop = efx_net_stop,
  1864. .ndo_get_stats64 = efx_net_stats,
  1865. .ndo_tx_timeout = efx_watchdog,
  1866. .ndo_start_xmit = efx_hard_start_xmit,
  1867. .ndo_validate_addr = eth_validate_addr,
  1868. .ndo_do_ioctl = efx_ioctl,
  1869. .ndo_change_mtu = efx_change_mtu,
  1870. .ndo_set_mac_address = efx_set_mac_address,
  1871. .ndo_set_rx_mode = efx_set_rx_mode,
  1872. .ndo_set_features = efx_set_features,
  1873. #ifdef CONFIG_NET_POLL_CONTROLLER
  1874. .ndo_poll_controller = efx_netpoll,
  1875. #endif
  1876. #ifdef CONFIG_NET_RX_BUSY_POLL
  1877. .ndo_busy_poll = efx_busy_poll,
  1878. #endif
  1879. #ifdef CONFIG_RFS_ACCEL
  1880. .ndo_rx_flow_steer = efx_filter_rfs,
  1881. #endif
  1882. };
  1883. static void efx_update_name(struct efx_nic *efx)
  1884. {
  1885. strcpy(efx->name, efx->net_dev->name);
  1886. efx_mtd_rename(efx);
  1887. efx_set_channel_names(efx);
  1888. }
  1889. static int efx_netdev_event(struct notifier_block *this,
  1890. unsigned long event, void *ptr)
  1891. {
  1892. struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
  1893. if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
  1894. net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
  1895. event == NETDEV_CHANGENAME)
  1896. efx_update_name(netdev_priv(net_dev));
  1897. return NOTIFY_DONE;
  1898. }
  1899. static struct notifier_block efx_netdev_notifier = {
  1900. .notifier_call = efx_netdev_event,
  1901. };
  1902. static ssize_t
  1903. show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
  1904. {
  1905. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  1906. return sprintf(buf, "%d\n", efx->phy_type);
  1907. }
  1908. static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
  1909. static int efx_register_netdev(struct efx_nic *efx)
  1910. {
  1911. struct net_device *net_dev = efx->net_dev;
  1912. struct efx_channel *channel;
  1913. int rc;
  1914. net_dev->watchdog_timeo = 5 * HZ;
  1915. net_dev->irq = efx->pci_dev->irq;
  1916. if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
  1917. net_dev->netdev_ops = &efx_ef10_netdev_ops;
  1918. net_dev->priv_flags |= IFF_UNICAST_FLT;
  1919. } else {
  1920. net_dev->netdev_ops = &efx_farch_netdev_ops;
  1921. }
  1922. net_dev->ethtool_ops = &efx_ethtool_ops;
  1923. net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
  1924. rtnl_lock();
  1925. /* Enable resets to be scheduled and check whether any were
  1926. * already requested. If so, the NIC is probably hosed so we
  1927. * abort.
  1928. */
  1929. efx->state = STATE_READY;
  1930. smp_mb(); /* ensure we change state before checking reset_pending */
  1931. if (efx->reset_pending) {
  1932. netif_err(efx, probe, efx->net_dev,
  1933. "aborting probe due to scheduled reset\n");
  1934. rc = -EIO;
  1935. goto fail_locked;
  1936. }
  1937. rc = dev_alloc_name(net_dev, net_dev->name);
  1938. if (rc < 0)
  1939. goto fail_locked;
  1940. efx_update_name(efx);
  1941. /* Always start with carrier off; PHY events will detect the link */
  1942. netif_carrier_off(net_dev);
  1943. rc = register_netdevice(net_dev);
  1944. if (rc)
  1945. goto fail_locked;
  1946. efx_for_each_channel(channel, efx) {
  1947. struct efx_tx_queue *tx_queue;
  1948. efx_for_each_channel_tx_queue(tx_queue, channel)
  1949. efx_init_tx_queue_core_txq(tx_queue);
  1950. }
  1951. efx_associate(efx);
  1952. rtnl_unlock();
  1953. rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1954. if (rc) {
  1955. netif_err(efx, drv, efx->net_dev,
  1956. "failed to init net dev attributes\n");
  1957. goto fail_registered;
  1958. }
  1959. return 0;
  1960. fail_registered:
  1961. rtnl_lock();
  1962. efx_dissociate(efx);
  1963. unregister_netdevice(net_dev);
  1964. fail_locked:
  1965. efx->state = STATE_UNINIT;
  1966. rtnl_unlock();
  1967. netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
  1968. return rc;
  1969. }
  1970. static void efx_unregister_netdev(struct efx_nic *efx)
  1971. {
  1972. if (!efx->net_dev)
  1973. return;
  1974. BUG_ON(netdev_priv(efx->net_dev) != efx);
  1975. strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
  1976. device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1977. rtnl_lock();
  1978. unregister_netdevice(efx->net_dev);
  1979. efx->state = STATE_UNINIT;
  1980. rtnl_unlock();
  1981. }
  1982. /**************************************************************************
  1983. *
  1984. * Device reset and suspend
  1985. *
  1986. **************************************************************************/
  1987. /* Tears down the entire software state and most of the hardware state
  1988. * before reset. */
  1989. void efx_reset_down(struct efx_nic *efx, enum reset_type method)
  1990. {
  1991. EFX_ASSERT_RESET_SERIALISED(efx);
  1992. if (method == RESET_TYPE_MCDI_TIMEOUT)
  1993. efx->type->prepare_flr(efx);
  1994. efx_stop_all(efx);
  1995. efx_disable_interrupts(efx);
  1996. mutex_lock(&efx->mac_lock);
  1997. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
  1998. efx->phy_op->fini(efx);
  1999. efx->type->fini(efx);
  2000. }
  2001. /* This function will always ensure that the locks acquired in
  2002. * efx_reset_down() are released. A failure return code indicates
  2003. * that we were unable to reinitialise the hardware, and the
  2004. * driver should be disabled. If ok is false, then the rx and tx
  2005. * engines are not restarted, pending a RESET_DISABLE. */
  2006. int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
  2007. {
  2008. int rc;
  2009. EFX_ASSERT_RESET_SERIALISED(efx);
  2010. if (method == RESET_TYPE_MCDI_TIMEOUT)
  2011. efx->type->finish_flr(efx);
  2012. /* Ensure that SRAM is initialised even if we're disabling the device */
  2013. rc = efx->type->init(efx);
  2014. if (rc) {
  2015. netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
  2016. goto fail;
  2017. }
  2018. if (!ok)
  2019. goto fail;
  2020. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
  2021. rc = efx->phy_op->init(efx);
  2022. if (rc)
  2023. goto fail;
  2024. if (efx->phy_op->reconfigure(efx))
  2025. netif_err(efx, drv, efx->net_dev,
  2026. "could not restore PHY settings\n");
  2027. }
  2028. rc = efx_enable_interrupts(efx);
  2029. if (rc)
  2030. goto fail;
  2031. efx_restore_filters(efx);
  2032. efx->type->sriov_reset(efx);
  2033. mutex_unlock(&efx->mac_lock);
  2034. efx_start_all(efx);
  2035. return 0;
  2036. fail:
  2037. efx->port_initialized = false;
  2038. mutex_unlock(&efx->mac_lock);
  2039. return rc;
  2040. }
  2041. /* Reset the NIC using the specified method. Note that the reset may
  2042. * fail, in which case the card will be left in an unusable state.
  2043. *
  2044. * Caller must hold the rtnl_lock.
  2045. */
  2046. int efx_reset(struct efx_nic *efx, enum reset_type method)
  2047. {
  2048. int rc, rc2;
  2049. bool disabled;
  2050. netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
  2051. RESET_TYPE(method));
  2052. efx_device_detach_sync(efx);
  2053. efx_reset_down(efx, method);
  2054. rc = efx->type->reset(efx, method);
  2055. if (rc) {
  2056. netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
  2057. goto out;
  2058. }
  2059. /* Clear flags for the scopes we covered. We assume the NIC and
  2060. * driver are now quiescent so that there is no race here.
  2061. */
  2062. if (method < RESET_TYPE_MAX_METHOD)
  2063. efx->reset_pending &= -(1 << (method + 1));
  2064. else /* it doesn't fit into the well-ordered scope hierarchy */
  2065. __clear_bit(method, &efx->reset_pending);
  2066. /* Reinitialise bus-mastering, which may have been turned off before
  2067. * the reset was scheduled. This is still appropriate, even in the
  2068. * RESET_TYPE_DISABLE since this driver generally assumes the hardware
  2069. * can respond to requests. */
  2070. pci_set_master(efx->pci_dev);
  2071. out:
  2072. /* Leave device stopped if necessary */
  2073. disabled = rc ||
  2074. method == RESET_TYPE_DISABLE ||
  2075. method == RESET_TYPE_RECOVER_OR_DISABLE;
  2076. rc2 = efx_reset_up(efx, method, !disabled);
  2077. if (rc2) {
  2078. disabled = true;
  2079. if (!rc)
  2080. rc = rc2;
  2081. }
  2082. if (disabled) {
  2083. dev_close(efx->net_dev);
  2084. netif_err(efx, drv, efx->net_dev, "has been disabled\n");
  2085. efx->state = STATE_DISABLED;
  2086. } else {
  2087. netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
  2088. netif_device_attach(efx->net_dev);
  2089. }
  2090. return rc;
  2091. }
  2092. /* Try recovery mechanisms.
  2093. * For now only EEH is supported.
  2094. * Returns 0 if the recovery mechanisms are unsuccessful.
  2095. * Returns a non-zero value otherwise.
  2096. */
  2097. int efx_try_recovery(struct efx_nic *efx)
  2098. {
  2099. #ifdef CONFIG_EEH
  2100. /* A PCI error can occur and not be seen by EEH because nothing
  2101. * happens on the PCI bus. In this case the driver may fail and
  2102. * schedule a 'recover or reset', leading to this recovery handler.
  2103. * Manually call the eeh failure check function.
  2104. */
  2105. struct eeh_dev *eehdev =
  2106. of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
  2107. if (eeh_dev_check_failure(eehdev)) {
  2108. /* The EEH mechanisms will handle the error and reset the
  2109. * device if necessary.
  2110. */
  2111. return 1;
  2112. }
  2113. #endif
  2114. return 0;
  2115. }
  2116. static void efx_wait_for_bist_end(struct efx_nic *efx)
  2117. {
  2118. int i;
  2119. for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
  2120. if (efx_mcdi_poll_reboot(efx))
  2121. goto out;
  2122. msleep(BIST_WAIT_DELAY_MS);
  2123. }
  2124. netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
  2125. out:
  2126. /* Either way unset the BIST flag. If we found no reboot we probably
  2127. * won't recover, but we should try.
  2128. */
  2129. efx->mc_bist_for_other_fn = false;
  2130. }
  2131. /* The worker thread exists so that code that cannot sleep can
  2132. * schedule a reset for later.
  2133. */
  2134. static void efx_reset_work(struct work_struct *data)
  2135. {
  2136. struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
  2137. unsigned long pending;
  2138. enum reset_type method;
  2139. pending = ACCESS_ONCE(efx->reset_pending);
  2140. method = fls(pending) - 1;
  2141. if (method == RESET_TYPE_MC_BIST)
  2142. efx_wait_for_bist_end(efx);
  2143. if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
  2144. method == RESET_TYPE_RECOVER_OR_ALL) &&
  2145. efx_try_recovery(efx))
  2146. return;
  2147. if (!pending)
  2148. return;
  2149. rtnl_lock();
  2150. /* We checked the state in efx_schedule_reset() but it may
  2151. * have changed by now. Now that we have the RTNL lock,
  2152. * it cannot change again.
  2153. */
  2154. if (efx->state == STATE_READY)
  2155. (void)efx_reset(efx, method);
  2156. rtnl_unlock();
  2157. }
  2158. void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
  2159. {
  2160. enum reset_type method;
  2161. if (efx->state == STATE_RECOVERY) {
  2162. netif_dbg(efx, drv, efx->net_dev,
  2163. "recovering: skip scheduling %s reset\n",
  2164. RESET_TYPE(type));
  2165. return;
  2166. }
  2167. switch (type) {
  2168. case RESET_TYPE_INVISIBLE:
  2169. case RESET_TYPE_ALL:
  2170. case RESET_TYPE_RECOVER_OR_ALL:
  2171. case RESET_TYPE_WORLD:
  2172. case RESET_TYPE_DISABLE:
  2173. case RESET_TYPE_RECOVER_OR_DISABLE:
  2174. case RESET_TYPE_MC_BIST:
  2175. case RESET_TYPE_MCDI_TIMEOUT:
  2176. method = type;
  2177. netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
  2178. RESET_TYPE(method));
  2179. break;
  2180. default:
  2181. method = efx->type->map_reset_reason(type);
  2182. netif_dbg(efx, drv, efx->net_dev,
  2183. "scheduling %s reset for %s\n",
  2184. RESET_TYPE(method), RESET_TYPE(type));
  2185. break;
  2186. }
  2187. set_bit(method, &efx->reset_pending);
  2188. smp_mb(); /* ensure we change reset_pending before checking state */
  2189. /* If we're not READY then just leave the flags set as the cue
  2190. * to abort probing or reschedule the reset later.
  2191. */
  2192. if (ACCESS_ONCE(efx->state) != STATE_READY)
  2193. return;
  2194. /* efx_process_channel() will no longer read events once a
  2195. * reset is scheduled. So switch back to poll'd MCDI completions. */
  2196. efx_mcdi_mode_poll(efx);
  2197. queue_work(reset_workqueue, &efx->reset_work);
  2198. }
  2199. /**************************************************************************
  2200. *
  2201. * List of NICs we support
  2202. *
  2203. **************************************************************************/
  2204. /* PCI device ID table */
  2205. static const struct pci_device_id efx_pci_table[] = {
  2206. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  2207. PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
  2208. .driver_data = (unsigned long) &falcon_a1_nic_type},
  2209. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  2210. PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
  2211. .driver_data = (unsigned long) &falcon_b0_nic_type},
  2212. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
  2213. .driver_data = (unsigned long) &siena_a0_nic_type},
  2214. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
  2215. .driver_data = (unsigned long) &siena_a0_nic_type},
  2216. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
  2217. .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
  2218. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
  2219. .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
  2220. {0} /* end of list */
  2221. };
  2222. /**************************************************************************
  2223. *
  2224. * Dummy PHY/MAC operations
  2225. *
  2226. * Can be used for some unimplemented operations
  2227. * Needed so all function pointers are valid and do not have to be tested
  2228. * before use
  2229. *
  2230. **************************************************************************/
  2231. int efx_port_dummy_op_int(struct efx_nic *efx)
  2232. {
  2233. return 0;
  2234. }
  2235. void efx_port_dummy_op_void(struct efx_nic *efx) {}
  2236. static bool efx_port_dummy_op_poll(struct efx_nic *efx)
  2237. {
  2238. return false;
  2239. }
  2240. static const struct efx_phy_operations efx_dummy_phy_operations = {
  2241. .init = efx_port_dummy_op_int,
  2242. .reconfigure = efx_port_dummy_op_int,
  2243. .poll = efx_port_dummy_op_poll,
  2244. .fini = efx_port_dummy_op_void,
  2245. };
  2246. /**************************************************************************
  2247. *
  2248. * Data housekeeping
  2249. *
  2250. **************************************************************************/
  2251. /* This zeroes out and then fills in the invariants in a struct
  2252. * efx_nic (including all sub-structures).
  2253. */
  2254. static int efx_init_struct(struct efx_nic *efx,
  2255. struct pci_dev *pci_dev, struct net_device *net_dev)
  2256. {
  2257. int i;
  2258. /* Initialise common structures */
  2259. INIT_LIST_HEAD(&efx->node);
  2260. INIT_LIST_HEAD(&efx->secondary_list);
  2261. spin_lock_init(&efx->biu_lock);
  2262. #ifdef CONFIG_SFC_MTD
  2263. INIT_LIST_HEAD(&efx->mtd_list);
  2264. #endif
  2265. INIT_WORK(&efx->reset_work, efx_reset_work);
  2266. INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
  2267. INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
  2268. efx->pci_dev = pci_dev;
  2269. efx->msg_enable = debug;
  2270. efx->state = STATE_UNINIT;
  2271. strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
  2272. efx->net_dev = net_dev;
  2273. efx->rx_prefix_size = efx->type->rx_prefix_size;
  2274. efx->rx_ip_align =
  2275. NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
  2276. efx->rx_packet_hash_offset =
  2277. efx->type->rx_hash_offset - efx->type->rx_prefix_size;
  2278. efx->rx_packet_ts_offset =
  2279. efx->type->rx_ts_offset - efx->type->rx_prefix_size;
  2280. spin_lock_init(&efx->stats_lock);
  2281. mutex_init(&efx->mac_lock);
  2282. efx->phy_op = &efx_dummy_phy_operations;
  2283. efx->mdio.dev = net_dev;
  2284. INIT_WORK(&efx->mac_work, efx_mac_work);
  2285. init_waitqueue_head(&efx->flush_wq);
  2286. for (i = 0; i < EFX_MAX_CHANNELS; i++) {
  2287. efx->channel[i] = efx_alloc_channel(efx, i, NULL);
  2288. if (!efx->channel[i])
  2289. goto fail;
  2290. efx->msi_context[i].efx = efx;
  2291. efx->msi_context[i].index = i;
  2292. }
  2293. /* Higher numbered interrupt modes are less capable! */
  2294. efx->interrupt_mode = max(efx->type->max_interrupt_mode,
  2295. interrupt_mode);
  2296. /* Would be good to use the net_dev name, but we're too early */
  2297. snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
  2298. pci_name(pci_dev));
  2299. efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
  2300. if (!efx->workqueue)
  2301. goto fail;
  2302. return 0;
  2303. fail:
  2304. efx_fini_struct(efx);
  2305. return -ENOMEM;
  2306. }
  2307. static void efx_fini_struct(struct efx_nic *efx)
  2308. {
  2309. int i;
  2310. for (i = 0; i < EFX_MAX_CHANNELS; i++)
  2311. kfree(efx->channel[i]);
  2312. kfree(efx->vpd_sn);
  2313. if (efx->workqueue) {
  2314. destroy_workqueue(efx->workqueue);
  2315. efx->workqueue = NULL;
  2316. }
  2317. }
  2318. void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
  2319. {
  2320. u64 n_rx_nodesc_trunc = 0;
  2321. struct efx_channel *channel;
  2322. efx_for_each_channel(channel, efx)
  2323. n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
  2324. stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
  2325. stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
  2326. }
  2327. /**************************************************************************
  2328. *
  2329. * PCI interface
  2330. *
  2331. **************************************************************************/
  2332. /* Main body of final NIC shutdown code
  2333. * This is called only at module unload (or hotplug removal).
  2334. */
  2335. static void efx_pci_remove_main(struct efx_nic *efx)
  2336. {
  2337. /* Flush reset_work. It can no longer be scheduled since we
  2338. * are not READY.
  2339. */
  2340. BUG_ON(efx->state == STATE_READY);
  2341. cancel_work_sync(&efx->reset_work);
  2342. efx_disable_interrupts(efx);
  2343. efx_nic_fini_interrupt(efx);
  2344. efx_fini_port(efx);
  2345. efx->type->fini(efx);
  2346. efx_fini_napi(efx);
  2347. efx_remove_all(efx);
  2348. }
  2349. /* Final NIC shutdown
  2350. * This is called only at module unload (or hotplug removal).
  2351. */
  2352. static void efx_pci_remove(struct pci_dev *pci_dev)
  2353. {
  2354. struct efx_nic *efx;
  2355. efx = pci_get_drvdata(pci_dev);
  2356. if (!efx)
  2357. return;
  2358. /* Mark the NIC as fini, then stop the interface */
  2359. rtnl_lock();
  2360. efx_dissociate(efx);
  2361. dev_close(efx->net_dev);
  2362. efx_disable_interrupts(efx);
  2363. rtnl_unlock();
  2364. efx->type->sriov_fini(efx);
  2365. efx_unregister_netdev(efx);
  2366. efx_mtd_remove(efx);
  2367. efx_pci_remove_main(efx);
  2368. efx_fini_io(efx);
  2369. netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
  2370. efx_fini_struct(efx);
  2371. free_netdev(efx->net_dev);
  2372. pci_disable_pcie_error_reporting(pci_dev);
  2373. };
  2374. /* NIC VPD information
  2375. * Called during probe to display the part number of the
  2376. * installed NIC. VPD is potentially very large but this should
  2377. * always appear within the first 512 bytes.
  2378. */
  2379. #define SFC_VPD_LEN 512
  2380. static void efx_probe_vpd_strings(struct efx_nic *efx)
  2381. {
  2382. struct pci_dev *dev = efx->pci_dev;
  2383. char vpd_data[SFC_VPD_LEN];
  2384. ssize_t vpd_size;
  2385. int ro_start, ro_size, i, j;
  2386. /* Get the vpd data from the device */
  2387. vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
  2388. if (vpd_size <= 0) {
  2389. netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
  2390. return;
  2391. }
  2392. /* Get the Read only section */
  2393. ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
  2394. if (ro_start < 0) {
  2395. netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
  2396. return;
  2397. }
  2398. ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
  2399. j = ro_size;
  2400. i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
  2401. if (i + j > vpd_size)
  2402. j = vpd_size - i;
  2403. /* Get the Part number */
  2404. i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
  2405. if (i < 0) {
  2406. netif_err(efx, drv, efx->net_dev, "Part number not found\n");
  2407. return;
  2408. }
  2409. j = pci_vpd_info_field_size(&vpd_data[i]);
  2410. i += PCI_VPD_INFO_FLD_HDR_SIZE;
  2411. if (i + j > vpd_size) {
  2412. netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
  2413. return;
  2414. }
  2415. netif_info(efx, drv, efx->net_dev,
  2416. "Part Number : %.*s\n", j, &vpd_data[i]);
  2417. i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
  2418. j = ro_size;
  2419. i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
  2420. if (i < 0) {
  2421. netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
  2422. return;
  2423. }
  2424. j = pci_vpd_info_field_size(&vpd_data[i]);
  2425. i += PCI_VPD_INFO_FLD_HDR_SIZE;
  2426. if (i + j > vpd_size) {
  2427. netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
  2428. return;
  2429. }
  2430. efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
  2431. if (!efx->vpd_sn)
  2432. return;
  2433. snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
  2434. }
  2435. /* Main body of NIC initialisation
  2436. * This is called at module load (or hotplug insertion, theoretically).
  2437. */
  2438. static int efx_pci_probe_main(struct efx_nic *efx)
  2439. {
  2440. int rc;
  2441. /* Do start-of-day initialisation */
  2442. rc = efx_probe_all(efx);
  2443. if (rc)
  2444. goto fail1;
  2445. efx_init_napi(efx);
  2446. rc = efx->type->init(efx);
  2447. if (rc) {
  2448. netif_err(efx, probe, efx->net_dev,
  2449. "failed to initialise NIC\n");
  2450. goto fail3;
  2451. }
  2452. rc = efx_init_port(efx);
  2453. if (rc) {
  2454. netif_err(efx, probe, efx->net_dev,
  2455. "failed to initialise port\n");
  2456. goto fail4;
  2457. }
  2458. rc = efx_nic_init_interrupt(efx);
  2459. if (rc)
  2460. goto fail5;
  2461. rc = efx_enable_interrupts(efx);
  2462. if (rc)
  2463. goto fail6;
  2464. return 0;
  2465. fail6:
  2466. efx_nic_fini_interrupt(efx);
  2467. fail5:
  2468. efx_fini_port(efx);
  2469. fail4:
  2470. efx->type->fini(efx);
  2471. fail3:
  2472. efx_fini_napi(efx);
  2473. efx_remove_all(efx);
  2474. fail1:
  2475. return rc;
  2476. }
  2477. /* NIC initialisation
  2478. *
  2479. * This is called at module load (or hotplug insertion,
  2480. * theoretically). It sets up PCI mappings, resets the NIC,
  2481. * sets up and registers the network devices with the kernel and hooks
  2482. * the interrupt service routine. It does not prepare the device for
  2483. * transmission; this is left to the first time one of the network
  2484. * interfaces is brought up (i.e. efx_net_open).
  2485. */
  2486. static int efx_pci_probe(struct pci_dev *pci_dev,
  2487. const struct pci_device_id *entry)
  2488. {
  2489. struct net_device *net_dev;
  2490. struct efx_nic *efx;
  2491. int rc;
  2492. /* Allocate and initialise a struct net_device and struct efx_nic */
  2493. net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
  2494. EFX_MAX_RX_QUEUES);
  2495. if (!net_dev)
  2496. return -ENOMEM;
  2497. efx = netdev_priv(net_dev);
  2498. efx->type = (const struct efx_nic_type *) entry->driver_data;
  2499. net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
  2500. NETIF_F_HIGHDMA | NETIF_F_TSO |
  2501. NETIF_F_RXCSUM);
  2502. if (efx->type->offload_features & NETIF_F_V6_CSUM)
  2503. net_dev->features |= NETIF_F_TSO6;
  2504. /* Mask for features that also apply to VLAN devices */
  2505. net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
  2506. NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
  2507. NETIF_F_RXCSUM);
  2508. /* All offloads can be toggled */
  2509. net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
  2510. pci_set_drvdata(pci_dev, efx);
  2511. SET_NETDEV_DEV(net_dev, &pci_dev->dev);
  2512. rc = efx_init_struct(efx, pci_dev, net_dev);
  2513. if (rc)
  2514. goto fail1;
  2515. netif_info(efx, probe, efx->net_dev,
  2516. "Solarflare NIC detected\n");
  2517. efx_probe_vpd_strings(efx);
  2518. /* Set up basic I/O (BAR mappings etc) */
  2519. rc = efx_init_io(efx);
  2520. if (rc)
  2521. goto fail2;
  2522. rc = efx_pci_probe_main(efx);
  2523. if (rc)
  2524. goto fail3;
  2525. rc = efx_register_netdev(efx);
  2526. if (rc)
  2527. goto fail4;
  2528. rc = efx->type->sriov_init(efx);
  2529. if (rc)
  2530. netif_err(efx, probe, efx->net_dev,
  2531. "SR-IOV can't be enabled rc %d\n", rc);
  2532. netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
  2533. /* Try to create MTDs, but allow this to fail */
  2534. rtnl_lock();
  2535. rc = efx_mtd_probe(efx);
  2536. rtnl_unlock();
  2537. if (rc)
  2538. netif_warn(efx, probe, efx->net_dev,
  2539. "failed to create MTDs (%d)\n", rc);
  2540. rc = pci_enable_pcie_error_reporting(pci_dev);
  2541. if (rc && rc != -EINVAL)
  2542. netif_warn(efx, probe, efx->net_dev,
  2543. "pci_enable_pcie_error_reporting failed (%d)\n", rc);
  2544. return 0;
  2545. fail4:
  2546. efx_pci_remove_main(efx);
  2547. fail3:
  2548. efx_fini_io(efx);
  2549. fail2:
  2550. efx_fini_struct(efx);
  2551. fail1:
  2552. WARN_ON(rc > 0);
  2553. netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
  2554. free_netdev(net_dev);
  2555. return rc;
  2556. }
  2557. static int efx_pm_freeze(struct device *dev)
  2558. {
  2559. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2560. rtnl_lock();
  2561. if (efx->state != STATE_DISABLED) {
  2562. efx->state = STATE_UNINIT;
  2563. efx_device_detach_sync(efx);
  2564. efx_stop_all(efx);
  2565. efx_disable_interrupts(efx);
  2566. }
  2567. rtnl_unlock();
  2568. return 0;
  2569. }
  2570. static int efx_pm_thaw(struct device *dev)
  2571. {
  2572. int rc;
  2573. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2574. rtnl_lock();
  2575. if (efx->state != STATE_DISABLED) {
  2576. rc = efx_enable_interrupts(efx);
  2577. if (rc)
  2578. goto fail;
  2579. mutex_lock(&efx->mac_lock);
  2580. efx->phy_op->reconfigure(efx);
  2581. mutex_unlock(&efx->mac_lock);
  2582. efx_start_all(efx);
  2583. netif_device_attach(efx->net_dev);
  2584. efx->state = STATE_READY;
  2585. efx->type->resume_wol(efx);
  2586. }
  2587. rtnl_unlock();
  2588. /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
  2589. queue_work(reset_workqueue, &efx->reset_work);
  2590. return 0;
  2591. fail:
  2592. rtnl_unlock();
  2593. return rc;
  2594. }
  2595. static int efx_pm_poweroff(struct device *dev)
  2596. {
  2597. struct pci_dev *pci_dev = to_pci_dev(dev);
  2598. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2599. efx->type->fini(efx);
  2600. efx->reset_pending = 0;
  2601. pci_save_state(pci_dev);
  2602. return pci_set_power_state(pci_dev, PCI_D3hot);
  2603. }
  2604. /* Used for both resume and restore */
  2605. static int efx_pm_resume(struct device *dev)
  2606. {
  2607. struct pci_dev *pci_dev = to_pci_dev(dev);
  2608. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2609. int rc;
  2610. rc = pci_set_power_state(pci_dev, PCI_D0);
  2611. if (rc)
  2612. return rc;
  2613. pci_restore_state(pci_dev);
  2614. rc = pci_enable_device(pci_dev);
  2615. if (rc)
  2616. return rc;
  2617. pci_set_master(efx->pci_dev);
  2618. rc = efx->type->reset(efx, RESET_TYPE_ALL);
  2619. if (rc)
  2620. return rc;
  2621. rc = efx->type->init(efx);
  2622. if (rc)
  2623. return rc;
  2624. rc = efx_pm_thaw(dev);
  2625. return rc;
  2626. }
  2627. static int efx_pm_suspend(struct device *dev)
  2628. {
  2629. int rc;
  2630. efx_pm_freeze(dev);
  2631. rc = efx_pm_poweroff(dev);
  2632. if (rc)
  2633. efx_pm_resume(dev);
  2634. return rc;
  2635. }
  2636. static const struct dev_pm_ops efx_pm_ops = {
  2637. .suspend = efx_pm_suspend,
  2638. .resume = efx_pm_resume,
  2639. .freeze = efx_pm_freeze,
  2640. .thaw = efx_pm_thaw,
  2641. .poweroff = efx_pm_poweroff,
  2642. .restore = efx_pm_resume,
  2643. };
  2644. /* A PCI error affecting this device was detected.
  2645. * At this point MMIO and DMA may be disabled.
  2646. * Stop the software path and request a slot reset.
  2647. */
  2648. static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
  2649. enum pci_channel_state state)
  2650. {
  2651. pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
  2652. struct efx_nic *efx = pci_get_drvdata(pdev);
  2653. if (state == pci_channel_io_perm_failure)
  2654. return PCI_ERS_RESULT_DISCONNECT;
  2655. rtnl_lock();
  2656. if (efx->state != STATE_DISABLED) {
  2657. efx->state = STATE_RECOVERY;
  2658. efx->reset_pending = 0;
  2659. efx_device_detach_sync(efx);
  2660. efx_stop_all(efx);
  2661. efx_disable_interrupts(efx);
  2662. status = PCI_ERS_RESULT_NEED_RESET;
  2663. } else {
  2664. /* If the interface is disabled we don't want to do anything
  2665. * with it.
  2666. */
  2667. status = PCI_ERS_RESULT_RECOVERED;
  2668. }
  2669. rtnl_unlock();
  2670. pci_disable_device(pdev);
  2671. return status;
  2672. }
  2673. /* Fake a successfull reset, which will be performed later in efx_io_resume. */
  2674. static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
  2675. {
  2676. struct efx_nic *efx = pci_get_drvdata(pdev);
  2677. pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
  2678. int rc;
  2679. if (pci_enable_device(pdev)) {
  2680. netif_err(efx, hw, efx->net_dev,
  2681. "Cannot re-enable PCI device after reset.\n");
  2682. status = PCI_ERS_RESULT_DISCONNECT;
  2683. }
  2684. rc = pci_cleanup_aer_uncorrect_error_status(pdev);
  2685. if (rc) {
  2686. netif_err(efx, hw, efx->net_dev,
  2687. "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
  2688. /* Non-fatal error. Continue. */
  2689. }
  2690. return status;
  2691. }
  2692. /* Perform the actual reset and resume I/O operations. */
  2693. static void efx_io_resume(struct pci_dev *pdev)
  2694. {
  2695. struct efx_nic *efx = pci_get_drvdata(pdev);
  2696. int rc;
  2697. rtnl_lock();
  2698. if (efx->state == STATE_DISABLED)
  2699. goto out;
  2700. rc = efx_reset(efx, RESET_TYPE_ALL);
  2701. if (rc) {
  2702. netif_err(efx, hw, efx->net_dev,
  2703. "efx_reset failed after PCI error (%d)\n", rc);
  2704. } else {
  2705. efx->state = STATE_READY;
  2706. netif_dbg(efx, hw, efx->net_dev,
  2707. "Done resetting and resuming IO after PCI error.\n");
  2708. }
  2709. out:
  2710. rtnl_unlock();
  2711. }
  2712. /* For simplicity and reliability, we always require a slot reset and try to
  2713. * reset the hardware when a pci error affecting the device is detected.
  2714. * We leave both the link_reset and mmio_enabled callback unimplemented:
  2715. * with our request for slot reset the mmio_enabled callback will never be
  2716. * called, and the link_reset callback is not used by AER or EEH mechanisms.
  2717. */
  2718. static struct pci_error_handlers efx_err_handlers = {
  2719. .error_detected = efx_io_error_detected,
  2720. .slot_reset = efx_io_slot_reset,
  2721. .resume = efx_io_resume,
  2722. };
  2723. static struct pci_driver efx_pci_driver = {
  2724. .name = KBUILD_MODNAME,
  2725. .id_table = efx_pci_table,
  2726. .probe = efx_pci_probe,
  2727. .remove = efx_pci_remove,
  2728. .driver.pm = &efx_pm_ops,
  2729. .err_handler = &efx_err_handlers,
  2730. };
  2731. /**************************************************************************
  2732. *
  2733. * Kernel module interface
  2734. *
  2735. *************************************************************************/
  2736. module_param(interrupt_mode, uint, 0444);
  2737. MODULE_PARM_DESC(interrupt_mode,
  2738. "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
  2739. static int __init efx_init_module(void)
  2740. {
  2741. int rc;
  2742. printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
  2743. rc = register_netdevice_notifier(&efx_netdev_notifier);
  2744. if (rc)
  2745. goto err_notifier;
  2746. rc = efx_init_sriov();
  2747. if (rc)
  2748. goto err_sriov;
  2749. reset_workqueue = create_singlethread_workqueue("sfc_reset");
  2750. if (!reset_workqueue) {
  2751. rc = -ENOMEM;
  2752. goto err_reset;
  2753. }
  2754. rc = pci_register_driver(&efx_pci_driver);
  2755. if (rc < 0)
  2756. goto err_pci;
  2757. return 0;
  2758. err_pci:
  2759. destroy_workqueue(reset_workqueue);
  2760. err_reset:
  2761. efx_fini_sriov();
  2762. err_sriov:
  2763. unregister_netdevice_notifier(&efx_netdev_notifier);
  2764. err_notifier:
  2765. return rc;
  2766. }
  2767. static void __exit efx_exit_module(void)
  2768. {
  2769. printk(KERN_INFO "Solarflare NET driver unloading\n");
  2770. pci_unregister_driver(&efx_pci_driver);
  2771. destroy_workqueue(reset_workqueue);
  2772. efx_fini_sriov();
  2773. unregister_netdevice_notifier(&efx_netdev_notifier);
  2774. }
  2775. module_init(efx_init_module);
  2776. module_exit(efx_exit_module);
  2777. MODULE_AUTHOR("Solarflare Communications and "
  2778. "Michael Brown <mbrown@fensystems.co.uk>");
  2779. MODULE_DESCRIPTION("Solarflare network driver");
  2780. MODULE_LICENSE("GPL");
  2781. MODULE_DEVICE_TABLE(pci, efx_pci_table);