ixgbe_ethtool.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. /* ethtool support for ixgbe */
  4. #include <linux/interrupt.h>
  5. #include <linux/types.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/pci.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/ethtool.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/highmem.h>
  13. #include <linux/uaccess.h>
  14. #include "ixgbe.h"
  15. #include "ixgbe_phy.h"
  16. #define IXGBE_ALL_RAR_ENTRIES 16
  17. enum {NETDEV_STATS, IXGBE_STATS};
  18. struct ixgbe_stats {
  19. char stat_string[ETH_GSTRING_LEN];
  20. int type;
  21. int sizeof_stat;
  22. int stat_offset;
  23. };
  24. #define IXGBE_STAT(m) IXGBE_STATS, \
  25. sizeof(((struct ixgbe_adapter *)0)->m), \
  26. offsetof(struct ixgbe_adapter, m)
  27. #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
  28. sizeof(((struct rtnl_link_stats64 *)0)->m), \
  29. offsetof(struct rtnl_link_stats64, m)
  30. static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
  31. {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
  32. {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
  33. {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
  34. {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
  35. {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  36. {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  37. {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  38. {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  39. {"lsc_int", IXGBE_STAT(lsc_int)},
  40. {"tx_busy", IXGBE_STAT(tx_busy)},
  41. {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  42. {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
  43. {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
  44. {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
  45. {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
  46. {"multicast", IXGBE_NETDEV_STAT(multicast)},
  47. {"broadcast", IXGBE_STAT(stats.bprc)},
  48. {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  49. {"collisions", IXGBE_NETDEV_STAT(collisions)},
  50. {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
  51. {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
  52. {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
  53. {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
  54. {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
  55. {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  56. {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  57. {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
  58. {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
  59. {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
  60. {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
  61. {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
  62. {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
  63. {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
  64. {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  65. {"tx_restart_queue", IXGBE_STAT(restart_queue)},
  66. {"rx_length_errors", IXGBE_STAT(stats.rlec)},
  67. {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
  68. {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
  69. {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
  70. {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
  71. {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
  72. {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
  73. {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
  74. {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
  75. {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
  76. {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
  77. {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
  78. {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
  79. {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
  80. {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
  81. {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
  82. {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
  83. {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
  84. {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
  85. {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
  86. {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
  87. #ifdef IXGBE_FCOE
  88. {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
  89. {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
  90. {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
  91. {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
  92. {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
  93. {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
  94. {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
  95. {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
  96. #endif /* IXGBE_FCOE */
  97. };
  98. /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
  99. * we set the num_rx_queues to evaluate to num_tx_queues. This is
  100. * used because we do not have a good way to get the max number of
  101. * rx queues with CONFIG_RPS disabled.
  102. */
  103. #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
  104. #define IXGBE_QUEUE_STATS_LEN ( \
  105. (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
  106. (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
  107. #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
  108. #define IXGBE_PB_STATS_LEN ( \
  109. (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
  110. sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
  111. sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
  112. sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
  113. / sizeof(u64))
  114. #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
  115. IXGBE_PB_STATS_LEN + \
  116. IXGBE_QUEUE_STATS_LEN)
  117. static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  118. "Register test (offline)", "Eeprom test (offline)",
  119. "Interrupt test (offline)", "Loopback test (offline)",
  120. "Link test (on/offline)"
  121. };
  122. #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
  123. static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
  124. #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
  125. "legacy-rx",
  126. };
  127. #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
  128. /* currently supported speeds for 10G */
  129. #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
  130. SUPPORTED_10000baseKX4_Full | \
  131. SUPPORTED_10000baseKR_Full)
  132. #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
  133. static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
  134. {
  135. if (!ixgbe_isbackplane(hw->phy.media_type))
  136. return SUPPORTED_10000baseT_Full;
  137. switch (hw->device_id) {
  138. case IXGBE_DEV_ID_82598:
  139. case IXGBE_DEV_ID_82599_KX4:
  140. case IXGBE_DEV_ID_82599_KX4_MEZZ:
  141. case IXGBE_DEV_ID_X550EM_X_KX4:
  142. return SUPPORTED_10000baseKX4_Full;
  143. case IXGBE_DEV_ID_82598_BX:
  144. case IXGBE_DEV_ID_82599_KR:
  145. case IXGBE_DEV_ID_X550EM_X_KR:
  146. case IXGBE_DEV_ID_X550EM_X_XFI:
  147. return SUPPORTED_10000baseKR_Full;
  148. default:
  149. return SUPPORTED_10000baseKX4_Full |
  150. SUPPORTED_10000baseKR_Full;
  151. }
  152. }
  153. static int ixgbe_get_link_ksettings(struct net_device *netdev,
  154. struct ethtool_link_ksettings *cmd)
  155. {
  156. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  157. struct ixgbe_hw *hw = &adapter->hw;
  158. ixgbe_link_speed supported_link;
  159. bool autoneg = false;
  160. u32 supported, advertising;
  161. ethtool_convert_link_mode_to_legacy_u32(&supported,
  162. cmd->link_modes.supported);
  163. hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
  164. /* set the supported link speeds */
  165. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  166. supported |= ixgbe_get_supported_10gtypes(hw);
  167. if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
  168. supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
  169. SUPPORTED_1000baseKX_Full :
  170. SUPPORTED_1000baseT_Full;
  171. if (supported_link & IXGBE_LINK_SPEED_100_FULL)
  172. supported |= SUPPORTED_100baseT_Full;
  173. if (supported_link & IXGBE_LINK_SPEED_10_FULL)
  174. supported |= SUPPORTED_10baseT_Full;
  175. /* default advertised speed if phy.autoneg_advertised isn't set */
  176. advertising = supported;
  177. /* set the advertised speeds */
  178. if (hw->phy.autoneg_advertised) {
  179. advertising = 0;
  180. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
  181. advertising |= ADVERTISED_10baseT_Full;
  182. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
  183. advertising |= ADVERTISED_100baseT_Full;
  184. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
  185. advertising |= supported & ADVRTSD_MSK_10G;
  186. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
  187. if (supported & SUPPORTED_1000baseKX_Full)
  188. advertising |= ADVERTISED_1000baseKX_Full;
  189. else
  190. advertising |= ADVERTISED_1000baseT_Full;
  191. }
  192. } else {
  193. if (hw->phy.multispeed_fiber && !autoneg) {
  194. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  195. advertising = ADVERTISED_10000baseT_Full;
  196. }
  197. }
  198. if (autoneg) {
  199. supported |= SUPPORTED_Autoneg;
  200. advertising |= ADVERTISED_Autoneg;
  201. cmd->base.autoneg = AUTONEG_ENABLE;
  202. } else
  203. cmd->base.autoneg = AUTONEG_DISABLE;
  204. /* Determine the remaining settings based on the PHY type. */
  205. switch (adapter->hw.phy.type) {
  206. case ixgbe_phy_tn:
  207. case ixgbe_phy_aq:
  208. case ixgbe_phy_x550em_ext_t:
  209. case ixgbe_phy_fw:
  210. case ixgbe_phy_cu_unknown:
  211. supported |= SUPPORTED_TP;
  212. advertising |= ADVERTISED_TP;
  213. cmd->base.port = PORT_TP;
  214. break;
  215. case ixgbe_phy_qt:
  216. supported |= SUPPORTED_FIBRE;
  217. advertising |= ADVERTISED_FIBRE;
  218. cmd->base.port = PORT_FIBRE;
  219. break;
  220. case ixgbe_phy_nl:
  221. case ixgbe_phy_sfp_passive_tyco:
  222. case ixgbe_phy_sfp_passive_unknown:
  223. case ixgbe_phy_sfp_ftl:
  224. case ixgbe_phy_sfp_avago:
  225. case ixgbe_phy_sfp_intel:
  226. case ixgbe_phy_sfp_unknown:
  227. case ixgbe_phy_qsfp_passive_unknown:
  228. case ixgbe_phy_qsfp_active_unknown:
  229. case ixgbe_phy_qsfp_intel:
  230. case ixgbe_phy_qsfp_unknown:
  231. /* SFP+ devices, further checking needed */
  232. switch (adapter->hw.phy.sfp_type) {
  233. case ixgbe_sfp_type_da_cu:
  234. case ixgbe_sfp_type_da_cu_core0:
  235. case ixgbe_sfp_type_da_cu_core1:
  236. supported |= SUPPORTED_FIBRE;
  237. advertising |= ADVERTISED_FIBRE;
  238. cmd->base.port = PORT_DA;
  239. break;
  240. case ixgbe_sfp_type_sr:
  241. case ixgbe_sfp_type_lr:
  242. case ixgbe_sfp_type_srlr_core0:
  243. case ixgbe_sfp_type_srlr_core1:
  244. case ixgbe_sfp_type_1g_sx_core0:
  245. case ixgbe_sfp_type_1g_sx_core1:
  246. case ixgbe_sfp_type_1g_lx_core0:
  247. case ixgbe_sfp_type_1g_lx_core1:
  248. supported |= SUPPORTED_FIBRE;
  249. advertising |= ADVERTISED_FIBRE;
  250. cmd->base.port = PORT_FIBRE;
  251. break;
  252. case ixgbe_sfp_type_not_present:
  253. supported |= SUPPORTED_FIBRE;
  254. advertising |= ADVERTISED_FIBRE;
  255. cmd->base.port = PORT_NONE;
  256. break;
  257. case ixgbe_sfp_type_1g_cu_core0:
  258. case ixgbe_sfp_type_1g_cu_core1:
  259. supported |= SUPPORTED_TP;
  260. advertising |= ADVERTISED_TP;
  261. cmd->base.port = PORT_TP;
  262. break;
  263. case ixgbe_sfp_type_unknown:
  264. default:
  265. supported |= SUPPORTED_FIBRE;
  266. advertising |= ADVERTISED_FIBRE;
  267. cmd->base.port = PORT_OTHER;
  268. break;
  269. }
  270. break;
  271. case ixgbe_phy_xaui:
  272. supported |= SUPPORTED_FIBRE;
  273. advertising |= ADVERTISED_FIBRE;
  274. cmd->base.port = PORT_NONE;
  275. break;
  276. case ixgbe_phy_unknown:
  277. case ixgbe_phy_generic:
  278. case ixgbe_phy_sfp_unsupported:
  279. default:
  280. supported |= SUPPORTED_FIBRE;
  281. advertising |= ADVERTISED_FIBRE;
  282. cmd->base.port = PORT_OTHER;
  283. break;
  284. }
  285. /* Indicate pause support */
  286. supported |= SUPPORTED_Pause;
  287. switch (hw->fc.requested_mode) {
  288. case ixgbe_fc_full:
  289. advertising |= ADVERTISED_Pause;
  290. break;
  291. case ixgbe_fc_rx_pause:
  292. advertising |= ADVERTISED_Pause |
  293. ADVERTISED_Asym_Pause;
  294. break;
  295. case ixgbe_fc_tx_pause:
  296. advertising |= ADVERTISED_Asym_Pause;
  297. break;
  298. default:
  299. advertising &= ~(ADVERTISED_Pause |
  300. ADVERTISED_Asym_Pause);
  301. }
  302. if (netif_carrier_ok(netdev)) {
  303. switch (adapter->link_speed) {
  304. case IXGBE_LINK_SPEED_10GB_FULL:
  305. cmd->base.speed = SPEED_10000;
  306. break;
  307. case IXGBE_LINK_SPEED_5GB_FULL:
  308. cmd->base.speed = SPEED_5000;
  309. break;
  310. case IXGBE_LINK_SPEED_2_5GB_FULL:
  311. cmd->base.speed = SPEED_2500;
  312. break;
  313. case IXGBE_LINK_SPEED_1GB_FULL:
  314. cmd->base.speed = SPEED_1000;
  315. break;
  316. case IXGBE_LINK_SPEED_100_FULL:
  317. cmd->base.speed = SPEED_100;
  318. break;
  319. case IXGBE_LINK_SPEED_10_FULL:
  320. cmd->base.speed = SPEED_10;
  321. break;
  322. default:
  323. break;
  324. }
  325. cmd->base.duplex = DUPLEX_FULL;
  326. } else {
  327. cmd->base.speed = SPEED_UNKNOWN;
  328. cmd->base.duplex = DUPLEX_UNKNOWN;
  329. }
  330. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  331. supported);
  332. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  333. advertising);
  334. return 0;
  335. }
  336. static int ixgbe_set_link_ksettings(struct net_device *netdev,
  337. const struct ethtool_link_ksettings *cmd)
  338. {
  339. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  340. struct ixgbe_hw *hw = &adapter->hw;
  341. u32 advertised, old;
  342. s32 err = 0;
  343. u32 supported, advertising;
  344. ethtool_convert_link_mode_to_legacy_u32(&supported,
  345. cmd->link_modes.supported);
  346. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  347. cmd->link_modes.advertising);
  348. if ((hw->phy.media_type == ixgbe_media_type_copper) ||
  349. (hw->phy.multispeed_fiber)) {
  350. /*
  351. * this function does not support duplex forcing, but can
  352. * limit the advertising of the adapter to the specified speed
  353. */
  354. if (advertising & ~supported)
  355. return -EINVAL;
  356. /* only allow one speed at a time if no autoneg */
  357. if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
  358. if (advertising ==
  359. (ADVERTISED_10000baseT_Full |
  360. ADVERTISED_1000baseT_Full))
  361. return -EINVAL;
  362. }
  363. old = hw->phy.autoneg_advertised;
  364. advertised = 0;
  365. if (advertising & ADVERTISED_10000baseT_Full)
  366. advertised |= IXGBE_LINK_SPEED_10GB_FULL;
  367. if (advertising & ADVERTISED_1000baseT_Full)
  368. advertised |= IXGBE_LINK_SPEED_1GB_FULL;
  369. if (advertising & ADVERTISED_100baseT_Full)
  370. advertised |= IXGBE_LINK_SPEED_100_FULL;
  371. if (advertising & ADVERTISED_10baseT_Full)
  372. advertised |= IXGBE_LINK_SPEED_10_FULL;
  373. if (old == advertised)
  374. return err;
  375. /* this sets the link speed and restarts auto-neg */
  376. while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  377. usleep_range(1000, 2000);
  378. hw->mac.autotry_restart = true;
  379. err = hw->mac.ops.setup_link(hw, advertised, true);
  380. if (err) {
  381. e_info(probe, "setup link failed with code %d\n", err);
  382. hw->mac.ops.setup_link(hw, old, true);
  383. }
  384. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  385. } else {
  386. /* in this case we currently only support 10Gb/FULL */
  387. u32 speed = cmd->base.speed;
  388. if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
  389. (advertising != ADVERTISED_10000baseT_Full) ||
  390. (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
  391. return -EINVAL;
  392. }
  393. return err;
  394. }
  395. static void ixgbe_get_pauseparam(struct net_device *netdev,
  396. struct ethtool_pauseparam *pause)
  397. {
  398. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  399. struct ixgbe_hw *hw = &adapter->hw;
  400. if (ixgbe_device_supports_autoneg_fc(hw) &&
  401. !hw->fc.disable_fc_autoneg)
  402. pause->autoneg = 1;
  403. else
  404. pause->autoneg = 0;
  405. if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
  406. pause->rx_pause = 1;
  407. } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
  408. pause->tx_pause = 1;
  409. } else if (hw->fc.current_mode == ixgbe_fc_full) {
  410. pause->rx_pause = 1;
  411. pause->tx_pause = 1;
  412. }
  413. }
  414. static int ixgbe_set_pauseparam(struct net_device *netdev,
  415. struct ethtool_pauseparam *pause)
  416. {
  417. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  418. struct ixgbe_hw *hw = &adapter->hw;
  419. struct ixgbe_fc_info fc = hw->fc;
  420. /* 82598 does no support link flow control with DCB enabled */
  421. if ((hw->mac.type == ixgbe_mac_82598EB) &&
  422. (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
  423. return -EINVAL;
  424. /* some devices do not support autoneg of link flow control */
  425. if ((pause->autoneg == AUTONEG_ENABLE) &&
  426. !ixgbe_device_supports_autoneg_fc(hw))
  427. return -EINVAL;
  428. fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
  429. if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
  430. fc.requested_mode = ixgbe_fc_full;
  431. else if (pause->rx_pause && !pause->tx_pause)
  432. fc.requested_mode = ixgbe_fc_rx_pause;
  433. else if (!pause->rx_pause && pause->tx_pause)
  434. fc.requested_mode = ixgbe_fc_tx_pause;
  435. else
  436. fc.requested_mode = ixgbe_fc_none;
  437. /* if the thing changed then we'll update and use new autoneg */
  438. if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
  439. hw->fc = fc;
  440. if (netif_running(netdev))
  441. ixgbe_reinit_locked(adapter);
  442. else
  443. ixgbe_reset(adapter);
  444. }
  445. return 0;
  446. }
  447. static u32 ixgbe_get_msglevel(struct net_device *netdev)
  448. {
  449. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  450. return adapter->msg_enable;
  451. }
  452. static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
  453. {
  454. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  455. adapter->msg_enable = data;
  456. }
  457. static int ixgbe_get_regs_len(struct net_device *netdev)
  458. {
  459. #define IXGBE_REGS_LEN 1139
  460. return IXGBE_REGS_LEN * sizeof(u32);
  461. }
  462. #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
  463. static void ixgbe_get_regs(struct net_device *netdev,
  464. struct ethtool_regs *regs, void *p)
  465. {
  466. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  467. struct ixgbe_hw *hw = &adapter->hw;
  468. u32 *regs_buff = p;
  469. u8 i;
  470. memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
  471. regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
  472. hw->device_id;
  473. /* General Registers */
  474. regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
  475. regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
  476. regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  477. regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
  478. regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
  479. regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  480. regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
  481. regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
  482. /* NVM Register */
  483. regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
  484. regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
  485. regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
  486. regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
  487. regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
  488. regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
  489. regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
  490. regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
  491. regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
  492. regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
  493. /* Interrupt */
  494. /* don't read EICR because it can clear interrupt causes, instead
  495. * read EICS which is a shadow but doesn't clear EICR */
  496. regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
  497. regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
  498. regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
  499. regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
  500. regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
  501. regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
  502. regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
  503. regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
  504. regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
  505. regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
  506. regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
  507. regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
  508. /* Flow Control */
  509. regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
  510. for (i = 0; i < 4; i++)
  511. regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
  512. for (i = 0; i < 8; i++) {
  513. switch (hw->mac.type) {
  514. case ixgbe_mac_82598EB:
  515. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
  516. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
  517. break;
  518. case ixgbe_mac_82599EB:
  519. case ixgbe_mac_X540:
  520. case ixgbe_mac_X550:
  521. case ixgbe_mac_X550EM_x:
  522. case ixgbe_mac_x550em_a:
  523. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
  524. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
  525. break;
  526. default:
  527. break;
  528. }
  529. }
  530. regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
  531. regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
  532. /* Receive DMA */
  533. for (i = 0; i < 64; i++)
  534. regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  535. for (i = 0; i < 64; i++)
  536. regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  537. for (i = 0; i < 64; i++)
  538. regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  539. for (i = 0; i < 64; i++)
  540. regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  541. for (i = 0; i < 64; i++)
  542. regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  543. for (i = 0; i < 64; i++)
  544. regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  545. for (i = 0; i < 16; i++)
  546. regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  547. for (i = 0; i < 16; i++)
  548. regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  549. regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  550. for (i = 0; i < 8; i++)
  551. regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
  552. regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  553. regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
  554. /* Receive */
  555. regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  556. regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  557. for (i = 0; i < 16; i++)
  558. regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
  559. for (i = 0; i < 16; i++)
  560. regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
  561. regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
  562. regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  563. regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  564. regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
  565. regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
  566. regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  567. for (i = 0; i < 8; i++)
  568. regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
  569. for (i = 0; i < 8; i++)
  570. regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
  571. regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
  572. /* Transmit */
  573. for (i = 0; i < 32; i++)
  574. regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  575. for (i = 0; i < 32; i++)
  576. regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  577. for (i = 0; i < 32; i++)
  578. regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  579. for (i = 0; i < 32; i++)
  580. regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  581. for (i = 0; i < 32; i++)
  582. regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  583. for (i = 0; i < 32; i++)
  584. regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  585. for (i = 0; i < 32; i++)
  586. regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
  587. for (i = 0; i < 32; i++)
  588. regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
  589. regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
  590. for (i = 0; i < 16; i++)
  591. regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
  592. regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
  593. for (i = 0; i < 8; i++)
  594. regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
  595. regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
  596. /* Wake Up */
  597. regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
  598. regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
  599. regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
  600. regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
  601. regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
  602. regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
  603. regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
  604. regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
  605. regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
  606. /* DCB */
  607. regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
  608. regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
  609. switch (hw->mac.type) {
  610. case ixgbe_mac_82598EB:
  611. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
  612. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
  613. for (i = 0; i < 8; i++)
  614. regs_buff[833 + i] =
  615. IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
  616. for (i = 0; i < 8; i++)
  617. regs_buff[841 + i] =
  618. IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
  619. for (i = 0; i < 8; i++)
  620. regs_buff[849 + i] =
  621. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
  622. for (i = 0; i < 8; i++)
  623. regs_buff[857 + i] =
  624. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
  625. break;
  626. case ixgbe_mac_82599EB:
  627. case ixgbe_mac_X540:
  628. case ixgbe_mac_X550:
  629. case ixgbe_mac_X550EM_x:
  630. case ixgbe_mac_x550em_a:
  631. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  632. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
  633. for (i = 0; i < 8; i++)
  634. regs_buff[833 + i] =
  635. IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
  636. for (i = 0; i < 8; i++)
  637. regs_buff[841 + i] =
  638. IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
  639. for (i = 0; i < 8; i++)
  640. regs_buff[849 + i] =
  641. IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
  642. for (i = 0; i < 8; i++)
  643. regs_buff[857 + i] =
  644. IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
  645. break;
  646. default:
  647. break;
  648. }
  649. for (i = 0; i < 8; i++)
  650. regs_buff[865 + i] =
  651. IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
  652. for (i = 0; i < 8; i++)
  653. regs_buff[873 + i] =
  654. IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
  655. /* Statistics */
  656. regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
  657. regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
  658. regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
  659. regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
  660. for (i = 0; i < 8; i++)
  661. regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
  662. regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
  663. regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
  664. regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
  665. regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
  666. regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
  667. regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
  668. regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
  669. for (i = 0; i < 8; i++)
  670. regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
  671. for (i = 0; i < 8; i++)
  672. regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
  673. for (i = 0; i < 8; i++)
  674. regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
  675. for (i = 0; i < 8; i++)
  676. regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
  677. regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
  678. regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
  679. regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
  680. regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
  681. regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
  682. regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
  683. regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
  684. regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
  685. regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
  686. regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
  687. regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
  688. regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
  689. regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
  690. regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
  691. for (i = 0; i < 8; i++)
  692. regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
  693. regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
  694. regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
  695. regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
  696. regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
  697. regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
  698. regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
  699. regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
  700. regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
  701. regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
  702. regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
  703. regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
  704. regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
  705. regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
  706. regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
  707. regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
  708. regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
  709. regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
  710. regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
  711. regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
  712. regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
  713. for (i = 0; i < 16; i++)
  714. regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
  715. for (i = 0; i < 16; i++)
  716. regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
  717. for (i = 0; i < 16; i++)
  718. regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
  719. for (i = 0; i < 16; i++)
  720. regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
  721. /* MAC */
  722. regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
  723. regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
  724. regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
  725. regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
  726. regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
  727. regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
  728. regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
  729. regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
  730. regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
  731. regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  732. regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
  733. regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
  734. regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
  735. regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
  736. regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
  737. regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
  738. regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
  739. regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
  740. regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
  741. regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
  742. regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
  743. regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
  744. regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
  745. regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
  746. regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
  747. regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
  748. regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  749. regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
  750. regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  751. regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
  752. regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
  753. regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
  754. regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
  755. /* Diagnostic */
  756. regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
  757. for (i = 0; i < 8; i++)
  758. regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
  759. regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
  760. for (i = 0; i < 4; i++)
  761. regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
  762. regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
  763. regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
  764. for (i = 0; i < 8; i++)
  765. regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
  766. regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
  767. for (i = 0; i < 4; i++)
  768. regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
  769. regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
  770. regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
  771. for (i = 0; i < 4; i++)
  772. regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
  773. regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
  774. for (i = 0; i < 4; i++)
  775. regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
  776. for (i = 0; i < 8; i++)
  777. regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
  778. regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
  779. regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
  780. regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
  781. regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
  782. regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
  783. regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
  784. regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
  785. regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
  786. regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
  787. /* 82599 X540 specific registers */
  788. regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  789. /* 82599 X540 specific DCB registers */
  790. regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
  791. regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
  792. for (i = 0; i < 4; i++)
  793. regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
  794. regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
  795. /* same as RTTQCNRM */
  796. regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
  797. /* same as RTTQCNRR */
  798. /* X540 specific DCB registers */
  799. regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
  800. regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
  801. }
  802. static int ixgbe_get_eeprom_len(struct net_device *netdev)
  803. {
  804. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  805. return adapter->hw.eeprom.word_size * 2;
  806. }
  807. static int ixgbe_get_eeprom(struct net_device *netdev,
  808. struct ethtool_eeprom *eeprom, u8 *bytes)
  809. {
  810. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  811. struct ixgbe_hw *hw = &adapter->hw;
  812. u16 *eeprom_buff;
  813. int first_word, last_word, eeprom_len;
  814. int ret_val = 0;
  815. u16 i;
  816. if (eeprom->len == 0)
  817. return -EINVAL;
  818. eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  819. first_word = eeprom->offset >> 1;
  820. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  821. eeprom_len = last_word - first_word + 1;
  822. eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
  823. if (!eeprom_buff)
  824. return -ENOMEM;
  825. ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
  826. eeprom_buff);
  827. /* Device's eeprom is always little-endian, word addressable */
  828. for (i = 0; i < eeprom_len; i++)
  829. le16_to_cpus(&eeprom_buff[i]);
  830. memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
  831. kfree(eeprom_buff);
  832. return ret_val;
  833. }
  834. static int ixgbe_set_eeprom(struct net_device *netdev,
  835. struct ethtool_eeprom *eeprom, u8 *bytes)
  836. {
  837. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  838. struct ixgbe_hw *hw = &adapter->hw;
  839. u16 *eeprom_buff;
  840. void *ptr;
  841. int max_len, first_word, last_word, ret_val = 0;
  842. u16 i;
  843. if (eeprom->len == 0)
  844. return -EINVAL;
  845. if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
  846. return -EINVAL;
  847. max_len = hw->eeprom.word_size * 2;
  848. first_word = eeprom->offset >> 1;
  849. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  850. eeprom_buff = kmalloc(max_len, GFP_KERNEL);
  851. if (!eeprom_buff)
  852. return -ENOMEM;
  853. ptr = eeprom_buff;
  854. if (eeprom->offset & 1) {
  855. /*
  856. * need read/modify/write of first changed EEPROM word
  857. * only the second byte of the word is being modified
  858. */
  859. ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
  860. if (ret_val)
  861. goto err;
  862. ptr++;
  863. }
  864. if ((eeprom->offset + eeprom->len) & 1) {
  865. /*
  866. * need read/modify/write of last changed EEPROM word
  867. * only the first byte of the word is being modified
  868. */
  869. ret_val = hw->eeprom.ops.read(hw, last_word,
  870. &eeprom_buff[last_word - first_word]);
  871. if (ret_val)
  872. goto err;
  873. }
  874. /* Device's eeprom is always little-endian, word addressable */
  875. for (i = 0; i < last_word - first_word + 1; i++)
  876. le16_to_cpus(&eeprom_buff[i]);
  877. memcpy(ptr, bytes, eeprom->len);
  878. for (i = 0; i < last_word - first_word + 1; i++)
  879. cpu_to_le16s(&eeprom_buff[i]);
  880. ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
  881. last_word - first_word + 1,
  882. eeprom_buff);
  883. /* Update the checksum */
  884. if (ret_val == 0)
  885. hw->eeprom.ops.update_checksum(hw);
  886. err:
  887. kfree(eeprom_buff);
  888. return ret_val;
  889. }
  890. static void ixgbe_get_drvinfo(struct net_device *netdev,
  891. struct ethtool_drvinfo *drvinfo)
  892. {
  893. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  894. strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
  895. strlcpy(drvinfo->version, ixgbe_driver_version,
  896. sizeof(drvinfo->version));
  897. strlcpy(drvinfo->fw_version, adapter->eeprom_id,
  898. sizeof(drvinfo->fw_version));
  899. strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
  900. sizeof(drvinfo->bus_info));
  901. drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
  902. }
  903. static void ixgbe_get_ringparam(struct net_device *netdev,
  904. struct ethtool_ringparam *ring)
  905. {
  906. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  907. struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
  908. struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
  909. ring->rx_max_pending = IXGBE_MAX_RXD;
  910. ring->tx_max_pending = IXGBE_MAX_TXD;
  911. ring->rx_pending = rx_ring->count;
  912. ring->tx_pending = tx_ring->count;
  913. }
  914. static int ixgbe_set_ringparam(struct net_device *netdev,
  915. struct ethtool_ringparam *ring)
  916. {
  917. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  918. struct ixgbe_ring *temp_ring;
  919. int i, j, err = 0;
  920. u32 new_rx_count, new_tx_count;
  921. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  922. return -EINVAL;
  923. new_tx_count = clamp_t(u32, ring->tx_pending,
  924. IXGBE_MIN_TXD, IXGBE_MAX_TXD);
  925. new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  926. new_rx_count = clamp_t(u32, ring->rx_pending,
  927. IXGBE_MIN_RXD, IXGBE_MAX_RXD);
  928. new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  929. if ((new_tx_count == adapter->tx_ring_count) &&
  930. (new_rx_count == adapter->rx_ring_count)) {
  931. /* nothing to do */
  932. return 0;
  933. }
  934. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  935. usleep_range(1000, 2000);
  936. if (!netif_running(adapter->netdev)) {
  937. for (i = 0; i < adapter->num_tx_queues; i++)
  938. adapter->tx_ring[i]->count = new_tx_count;
  939. for (i = 0; i < adapter->num_xdp_queues; i++)
  940. adapter->xdp_ring[i]->count = new_tx_count;
  941. for (i = 0; i < adapter->num_rx_queues; i++)
  942. adapter->rx_ring[i]->count = new_rx_count;
  943. adapter->tx_ring_count = new_tx_count;
  944. adapter->xdp_ring_count = new_tx_count;
  945. adapter->rx_ring_count = new_rx_count;
  946. goto clear_reset;
  947. }
  948. /* allocate temporary buffer to store rings in */
  949. i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
  950. adapter->num_rx_queues);
  951. temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
  952. if (!temp_ring) {
  953. err = -ENOMEM;
  954. goto clear_reset;
  955. }
  956. ixgbe_down(adapter);
  957. /*
  958. * Setup new Tx resources and free the old Tx resources in that order.
  959. * We can then assign the new resources to the rings via a memcpy.
  960. * The advantage to this approach is that we are guaranteed to still
  961. * have resources even in the case of an allocation failure.
  962. */
  963. if (new_tx_count != adapter->tx_ring_count) {
  964. for (i = 0; i < adapter->num_tx_queues; i++) {
  965. memcpy(&temp_ring[i], adapter->tx_ring[i],
  966. sizeof(struct ixgbe_ring));
  967. temp_ring[i].count = new_tx_count;
  968. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  969. if (err) {
  970. while (i) {
  971. i--;
  972. ixgbe_free_tx_resources(&temp_ring[i]);
  973. }
  974. goto err_setup;
  975. }
  976. }
  977. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  978. memcpy(&temp_ring[i], adapter->xdp_ring[j],
  979. sizeof(struct ixgbe_ring));
  980. temp_ring[i].count = new_tx_count;
  981. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  982. if (err) {
  983. while (i) {
  984. i--;
  985. ixgbe_free_tx_resources(&temp_ring[i]);
  986. }
  987. goto err_setup;
  988. }
  989. }
  990. for (i = 0; i < adapter->num_tx_queues; i++) {
  991. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  992. memcpy(adapter->tx_ring[i], &temp_ring[i],
  993. sizeof(struct ixgbe_ring));
  994. }
  995. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  996. ixgbe_free_tx_resources(adapter->xdp_ring[j]);
  997. memcpy(adapter->xdp_ring[j], &temp_ring[i],
  998. sizeof(struct ixgbe_ring));
  999. }
  1000. adapter->tx_ring_count = new_tx_count;
  1001. }
  1002. /* Repeat the process for the Rx rings if needed */
  1003. if (new_rx_count != adapter->rx_ring_count) {
  1004. for (i = 0; i < adapter->num_rx_queues; i++) {
  1005. memcpy(&temp_ring[i], adapter->rx_ring[i],
  1006. sizeof(struct ixgbe_ring));
  1007. /* Clear copied XDP RX-queue info */
  1008. memset(&temp_ring[i].xdp_rxq, 0,
  1009. sizeof(temp_ring[i].xdp_rxq));
  1010. temp_ring[i].count = new_rx_count;
  1011. err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
  1012. if (err) {
  1013. while (i) {
  1014. i--;
  1015. ixgbe_free_rx_resources(&temp_ring[i]);
  1016. }
  1017. goto err_setup;
  1018. }
  1019. }
  1020. for (i = 0; i < adapter->num_rx_queues; i++) {
  1021. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  1022. memcpy(adapter->rx_ring[i], &temp_ring[i],
  1023. sizeof(struct ixgbe_ring));
  1024. }
  1025. adapter->rx_ring_count = new_rx_count;
  1026. }
  1027. err_setup:
  1028. ixgbe_up(adapter);
  1029. vfree(temp_ring);
  1030. clear_reset:
  1031. clear_bit(__IXGBE_RESETTING, &adapter->state);
  1032. return err;
  1033. }
  1034. static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
  1035. {
  1036. switch (sset) {
  1037. case ETH_SS_TEST:
  1038. return IXGBE_TEST_LEN;
  1039. case ETH_SS_STATS:
  1040. return IXGBE_STATS_LEN;
  1041. case ETH_SS_PRIV_FLAGS:
  1042. return IXGBE_PRIV_FLAGS_STR_LEN;
  1043. default:
  1044. return -EOPNOTSUPP;
  1045. }
  1046. }
  1047. static void ixgbe_get_ethtool_stats(struct net_device *netdev,
  1048. struct ethtool_stats *stats, u64 *data)
  1049. {
  1050. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1051. struct rtnl_link_stats64 temp;
  1052. const struct rtnl_link_stats64 *net_stats;
  1053. unsigned int start;
  1054. struct ixgbe_ring *ring;
  1055. int i, j;
  1056. char *p = NULL;
  1057. ixgbe_update_stats(adapter);
  1058. net_stats = dev_get_stats(netdev, &temp);
  1059. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1060. switch (ixgbe_gstrings_stats[i].type) {
  1061. case NETDEV_STATS:
  1062. p = (char *) net_stats +
  1063. ixgbe_gstrings_stats[i].stat_offset;
  1064. break;
  1065. case IXGBE_STATS:
  1066. p = (char *) adapter +
  1067. ixgbe_gstrings_stats[i].stat_offset;
  1068. break;
  1069. default:
  1070. data[i] = 0;
  1071. continue;
  1072. }
  1073. data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
  1074. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  1075. }
  1076. for (j = 0; j < netdev->num_tx_queues; j++) {
  1077. ring = adapter->tx_ring[j];
  1078. if (!ring) {
  1079. data[i] = 0;
  1080. data[i+1] = 0;
  1081. i += 2;
  1082. continue;
  1083. }
  1084. do {
  1085. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1086. data[i] = ring->stats.packets;
  1087. data[i+1] = ring->stats.bytes;
  1088. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1089. i += 2;
  1090. }
  1091. for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
  1092. ring = adapter->rx_ring[j];
  1093. if (!ring) {
  1094. data[i] = 0;
  1095. data[i+1] = 0;
  1096. i += 2;
  1097. continue;
  1098. }
  1099. do {
  1100. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1101. data[i] = ring->stats.packets;
  1102. data[i+1] = ring->stats.bytes;
  1103. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1104. i += 2;
  1105. }
  1106. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1107. data[i++] = adapter->stats.pxontxc[j];
  1108. data[i++] = adapter->stats.pxofftxc[j];
  1109. }
  1110. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1111. data[i++] = adapter->stats.pxonrxc[j];
  1112. data[i++] = adapter->stats.pxoffrxc[j];
  1113. }
  1114. }
  1115. static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
  1116. u8 *data)
  1117. {
  1118. char *p = (char *)data;
  1119. unsigned int i;
  1120. switch (stringset) {
  1121. case ETH_SS_TEST:
  1122. for (i = 0; i < IXGBE_TEST_LEN; i++) {
  1123. memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
  1124. data += ETH_GSTRING_LEN;
  1125. }
  1126. break;
  1127. case ETH_SS_STATS:
  1128. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1129. memcpy(p, ixgbe_gstrings_stats[i].stat_string,
  1130. ETH_GSTRING_LEN);
  1131. p += ETH_GSTRING_LEN;
  1132. }
  1133. for (i = 0; i < netdev->num_tx_queues; i++) {
  1134. sprintf(p, "tx_queue_%u_packets", i);
  1135. p += ETH_GSTRING_LEN;
  1136. sprintf(p, "tx_queue_%u_bytes", i);
  1137. p += ETH_GSTRING_LEN;
  1138. }
  1139. for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
  1140. sprintf(p, "rx_queue_%u_packets", i);
  1141. p += ETH_GSTRING_LEN;
  1142. sprintf(p, "rx_queue_%u_bytes", i);
  1143. p += ETH_GSTRING_LEN;
  1144. }
  1145. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1146. sprintf(p, "tx_pb_%u_pxon", i);
  1147. p += ETH_GSTRING_LEN;
  1148. sprintf(p, "tx_pb_%u_pxoff", i);
  1149. p += ETH_GSTRING_LEN;
  1150. }
  1151. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1152. sprintf(p, "rx_pb_%u_pxon", i);
  1153. p += ETH_GSTRING_LEN;
  1154. sprintf(p, "rx_pb_%u_pxoff", i);
  1155. p += ETH_GSTRING_LEN;
  1156. }
  1157. /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
  1158. break;
  1159. case ETH_SS_PRIV_FLAGS:
  1160. memcpy(data, ixgbe_priv_flags_strings,
  1161. IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
  1162. }
  1163. }
  1164. static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
  1165. {
  1166. struct ixgbe_hw *hw = &adapter->hw;
  1167. bool link_up;
  1168. u32 link_speed = 0;
  1169. if (ixgbe_removed(hw->hw_addr)) {
  1170. *data = 1;
  1171. return 1;
  1172. }
  1173. *data = 0;
  1174. hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
  1175. if (link_up)
  1176. return *data;
  1177. else
  1178. *data = 1;
  1179. return *data;
  1180. }
  1181. /* ethtool register test data */
  1182. struct ixgbe_reg_test {
  1183. u16 reg;
  1184. u8 array_len;
  1185. u8 test_type;
  1186. u32 mask;
  1187. u32 write;
  1188. };
  1189. /* In the hardware, registers are laid out either singly, in arrays
  1190. * spaced 0x40 bytes apart, or in contiguous tables. We assume
  1191. * most tests take place on arrays or single registers (handled
  1192. * as a single-element array) and special-case the tables.
  1193. * Table tests are always pattern tests.
  1194. *
  1195. * We also make provision for some required setup steps by specifying
  1196. * registers to be written without any read-back testing.
  1197. */
  1198. #define PATTERN_TEST 1
  1199. #define SET_READ_TEST 2
  1200. #define WRITE_NO_TEST 3
  1201. #define TABLE32_TEST 4
  1202. #define TABLE64_TEST_LO 5
  1203. #define TABLE64_TEST_HI 6
  1204. /* default 82599 register test */
  1205. static const struct ixgbe_reg_test reg_test_82599[] = {
  1206. { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1207. { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1208. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1209. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1210. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
  1211. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1212. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1213. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1214. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1215. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1216. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1217. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1218. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1219. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1220. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
  1221. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
  1222. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1223. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
  1224. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1225. { .reg = 0 }
  1226. };
  1227. /* default 82598 register test */
  1228. static const struct ixgbe_reg_test reg_test_82598[] = {
  1229. { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1230. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1231. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1232. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1233. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1234. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1235. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1236. /* Enable all four RX queues before testing. */
  1237. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1238. /* RDH is read-only for 82598, only test RDT. */
  1239. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1240. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1241. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1242. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1243. { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
  1244. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1245. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1246. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1247. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
  1248. { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
  1249. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1250. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
  1251. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1252. { .reg = 0 }
  1253. };
  1254. static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1255. u32 mask, u32 write)
  1256. {
  1257. u32 pat, val, before;
  1258. static const u32 test_pattern[] = {
  1259. 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
  1260. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1261. *data = 1;
  1262. return true;
  1263. }
  1264. for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
  1265. before = ixgbe_read_reg(&adapter->hw, reg);
  1266. ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
  1267. val = ixgbe_read_reg(&adapter->hw, reg);
  1268. if (val != (test_pattern[pat] & write & mask)) {
  1269. e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
  1270. reg, val, (test_pattern[pat] & write & mask));
  1271. *data = reg;
  1272. ixgbe_write_reg(&adapter->hw, reg, before);
  1273. return true;
  1274. }
  1275. ixgbe_write_reg(&adapter->hw, reg, before);
  1276. }
  1277. return false;
  1278. }
  1279. static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1280. u32 mask, u32 write)
  1281. {
  1282. u32 val, before;
  1283. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1284. *data = 1;
  1285. return true;
  1286. }
  1287. before = ixgbe_read_reg(&adapter->hw, reg);
  1288. ixgbe_write_reg(&adapter->hw, reg, write & mask);
  1289. val = ixgbe_read_reg(&adapter->hw, reg);
  1290. if ((write & mask) != (val & mask)) {
  1291. e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
  1292. reg, (val & mask), (write & mask));
  1293. *data = reg;
  1294. ixgbe_write_reg(&adapter->hw, reg, before);
  1295. return true;
  1296. }
  1297. ixgbe_write_reg(&adapter->hw, reg, before);
  1298. return false;
  1299. }
  1300. static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
  1301. {
  1302. const struct ixgbe_reg_test *test;
  1303. u32 value, before, after;
  1304. u32 i, toggle;
  1305. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1306. e_err(drv, "Adapter removed - register test blocked\n");
  1307. *data = 1;
  1308. return 1;
  1309. }
  1310. switch (adapter->hw.mac.type) {
  1311. case ixgbe_mac_82598EB:
  1312. toggle = 0x7FFFF3FF;
  1313. test = reg_test_82598;
  1314. break;
  1315. case ixgbe_mac_82599EB:
  1316. case ixgbe_mac_X540:
  1317. case ixgbe_mac_X550:
  1318. case ixgbe_mac_X550EM_x:
  1319. case ixgbe_mac_x550em_a:
  1320. toggle = 0x7FFFF30F;
  1321. test = reg_test_82599;
  1322. break;
  1323. default:
  1324. *data = 1;
  1325. return 1;
  1326. }
  1327. /*
  1328. * Because the status register is such a special case,
  1329. * we handle it separately from the rest of the register
  1330. * tests. Some bits are read-only, some toggle, and some
  1331. * are writeable on newer MACs.
  1332. */
  1333. before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
  1334. value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
  1335. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
  1336. after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
  1337. if (value != after) {
  1338. e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
  1339. after, value);
  1340. *data = 1;
  1341. return 1;
  1342. }
  1343. /* restore previous status */
  1344. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
  1345. /*
  1346. * Perform the remainder of the register test, looping through
  1347. * the test table until we either fail or reach the null entry.
  1348. */
  1349. while (test->reg) {
  1350. for (i = 0; i < test->array_len; i++) {
  1351. bool b = false;
  1352. switch (test->test_type) {
  1353. case PATTERN_TEST:
  1354. b = reg_pattern_test(adapter, data,
  1355. test->reg + (i * 0x40),
  1356. test->mask,
  1357. test->write);
  1358. break;
  1359. case SET_READ_TEST:
  1360. b = reg_set_and_check(adapter, data,
  1361. test->reg + (i * 0x40),
  1362. test->mask,
  1363. test->write);
  1364. break;
  1365. case WRITE_NO_TEST:
  1366. ixgbe_write_reg(&adapter->hw,
  1367. test->reg + (i * 0x40),
  1368. test->write);
  1369. break;
  1370. case TABLE32_TEST:
  1371. b = reg_pattern_test(adapter, data,
  1372. test->reg + (i * 4),
  1373. test->mask,
  1374. test->write);
  1375. break;
  1376. case TABLE64_TEST_LO:
  1377. b = reg_pattern_test(adapter, data,
  1378. test->reg + (i * 8),
  1379. test->mask,
  1380. test->write);
  1381. break;
  1382. case TABLE64_TEST_HI:
  1383. b = reg_pattern_test(adapter, data,
  1384. (test->reg + 4) + (i * 8),
  1385. test->mask,
  1386. test->write);
  1387. break;
  1388. }
  1389. if (b)
  1390. return 1;
  1391. }
  1392. test++;
  1393. }
  1394. *data = 0;
  1395. return 0;
  1396. }
  1397. static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
  1398. {
  1399. struct ixgbe_hw *hw = &adapter->hw;
  1400. if (hw->eeprom.ops.validate_checksum(hw, NULL))
  1401. *data = 1;
  1402. else
  1403. *data = 0;
  1404. return *data;
  1405. }
  1406. static irqreturn_t ixgbe_test_intr(int irq, void *data)
  1407. {
  1408. struct net_device *netdev = (struct net_device *) data;
  1409. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1410. adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
  1411. return IRQ_HANDLED;
  1412. }
  1413. static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
  1414. {
  1415. struct net_device *netdev = adapter->netdev;
  1416. u32 mask, i = 0, shared_int = true;
  1417. u32 irq = adapter->pdev->irq;
  1418. *data = 0;
  1419. /* Hook up test interrupt handler just for this test */
  1420. if (adapter->msix_entries) {
  1421. /* NOTE: we don't test MSI-X interrupts here, yet */
  1422. return 0;
  1423. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  1424. shared_int = false;
  1425. if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
  1426. netdev)) {
  1427. *data = 1;
  1428. return -1;
  1429. }
  1430. } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
  1431. netdev->name, netdev)) {
  1432. shared_int = false;
  1433. } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
  1434. netdev->name, netdev)) {
  1435. *data = 1;
  1436. return -1;
  1437. }
  1438. e_info(hw, "testing %s interrupt\n", shared_int ?
  1439. "shared" : "unshared");
  1440. /* Disable all the interrupts */
  1441. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1442. IXGBE_WRITE_FLUSH(&adapter->hw);
  1443. usleep_range(10000, 20000);
  1444. /* Test each interrupt */
  1445. for (; i < 10; i++) {
  1446. /* Interrupt to test */
  1447. mask = BIT(i);
  1448. if (!shared_int) {
  1449. /*
  1450. * Disable the interrupts to be reported in
  1451. * the cause register and then force the same
  1452. * interrupt and see if one gets posted. If
  1453. * an interrupt was posted to the bus, the
  1454. * test failed.
  1455. */
  1456. adapter->test_icr = 0;
  1457. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1458. ~mask & 0x00007FFF);
  1459. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1460. ~mask & 0x00007FFF);
  1461. IXGBE_WRITE_FLUSH(&adapter->hw);
  1462. usleep_range(10000, 20000);
  1463. if (adapter->test_icr & mask) {
  1464. *data = 3;
  1465. break;
  1466. }
  1467. }
  1468. /*
  1469. * Enable the interrupt to be reported in the cause
  1470. * register and then force the same interrupt and see
  1471. * if one gets posted. If an interrupt was not posted
  1472. * to the bus, the test failed.
  1473. */
  1474. adapter->test_icr = 0;
  1475. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1476. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  1477. IXGBE_WRITE_FLUSH(&adapter->hw);
  1478. usleep_range(10000, 20000);
  1479. if (!(adapter->test_icr & mask)) {
  1480. *data = 4;
  1481. break;
  1482. }
  1483. if (!shared_int) {
  1484. /*
  1485. * Disable the other interrupts to be reported in
  1486. * the cause register and then force the other
  1487. * interrupts and see if any get posted. If
  1488. * an interrupt was posted to the bus, the
  1489. * test failed.
  1490. */
  1491. adapter->test_icr = 0;
  1492. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1493. ~mask & 0x00007FFF);
  1494. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1495. ~mask & 0x00007FFF);
  1496. IXGBE_WRITE_FLUSH(&adapter->hw);
  1497. usleep_range(10000, 20000);
  1498. if (adapter->test_icr) {
  1499. *data = 5;
  1500. break;
  1501. }
  1502. }
  1503. }
  1504. /* Disable all the interrupts */
  1505. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1506. IXGBE_WRITE_FLUSH(&adapter->hw);
  1507. usleep_range(10000, 20000);
  1508. /* Unhook test interrupt handler */
  1509. free_irq(irq, netdev);
  1510. return *data;
  1511. }
  1512. static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
  1513. {
  1514. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1515. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1516. struct ixgbe_hw *hw = &adapter->hw;
  1517. u32 reg_ctl;
  1518. /* shut down the DMA engines now so they can be reinitialized later */
  1519. /* first Rx */
  1520. hw->mac.ops.disable_rx(hw);
  1521. ixgbe_disable_rx_queue(adapter, rx_ring);
  1522. /* now Tx */
  1523. reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
  1524. reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
  1525. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
  1526. switch (hw->mac.type) {
  1527. case ixgbe_mac_82599EB:
  1528. case ixgbe_mac_X540:
  1529. case ixgbe_mac_X550:
  1530. case ixgbe_mac_X550EM_x:
  1531. case ixgbe_mac_x550em_a:
  1532. reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
  1533. reg_ctl &= ~IXGBE_DMATXCTL_TE;
  1534. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
  1535. break;
  1536. default:
  1537. break;
  1538. }
  1539. ixgbe_reset(adapter);
  1540. ixgbe_free_tx_resources(&adapter->test_tx_ring);
  1541. ixgbe_free_rx_resources(&adapter->test_rx_ring);
  1542. }
  1543. static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
  1544. {
  1545. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1546. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1547. struct ixgbe_hw *hw = &adapter->hw;
  1548. u32 rctl, reg_data;
  1549. int ret_val;
  1550. int err;
  1551. /* Setup Tx descriptor ring and Tx buffers */
  1552. tx_ring->count = IXGBE_DEFAULT_TXD;
  1553. tx_ring->queue_index = 0;
  1554. tx_ring->dev = &adapter->pdev->dev;
  1555. tx_ring->netdev = adapter->netdev;
  1556. tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
  1557. err = ixgbe_setup_tx_resources(tx_ring);
  1558. if (err)
  1559. return 1;
  1560. switch (adapter->hw.mac.type) {
  1561. case ixgbe_mac_82599EB:
  1562. case ixgbe_mac_X540:
  1563. case ixgbe_mac_X550:
  1564. case ixgbe_mac_X550EM_x:
  1565. case ixgbe_mac_x550em_a:
  1566. reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
  1567. reg_data |= IXGBE_DMATXCTL_TE;
  1568. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
  1569. break;
  1570. default:
  1571. break;
  1572. }
  1573. ixgbe_configure_tx_ring(adapter, tx_ring);
  1574. /* Setup Rx Descriptor ring and Rx buffers */
  1575. rx_ring->count = IXGBE_DEFAULT_RXD;
  1576. rx_ring->queue_index = 0;
  1577. rx_ring->dev = &adapter->pdev->dev;
  1578. rx_ring->netdev = adapter->netdev;
  1579. rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
  1580. err = ixgbe_setup_rx_resources(adapter, rx_ring);
  1581. if (err) {
  1582. ret_val = 4;
  1583. goto err_nomem;
  1584. }
  1585. hw->mac.ops.disable_rx(hw);
  1586. ixgbe_configure_rx_ring(adapter, rx_ring);
  1587. rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
  1588. rctl |= IXGBE_RXCTRL_DMBYPS;
  1589. IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
  1590. hw->mac.ops.enable_rx(hw);
  1591. return 0;
  1592. err_nomem:
  1593. ixgbe_free_desc_rings(adapter);
  1594. return ret_val;
  1595. }
  1596. static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
  1597. {
  1598. struct ixgbe_hw *hw = &adapter->hw;
  1599. u32 reg_data;
  1600. /* Setup MAC loopback */
  1601. reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  1602. reg_data |= IXGBE_HLREG0_LPBK;
  1603. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
  1604. reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  1605. reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
  1606. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
  1607. /* X540 and X550 needs to set the MACC.FLU bit to force link up */
  1608. switch (adapter->hw.mac.type) {
  1609. case ixgbe_mac_X540:
  1610. case ixgbe_mac_X550:
  1611. case ixgbe_mac_X550EM_x:
  1612. case ixgbe_mac_x550em_a:
  1613. reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
  1614. reg_data |= IXGBE_MACC_FLU;
  1615. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
  1616. break;
  1617. default:
  1618. if (hw->mac.orig_autoc) {
  1619. reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
  1620. IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
  1621. } else {
  1622. return 10;
  1623. }
  1624. }
  1625. IXGBE_WRITE_FLUSH(hw);
  1626. usleep_range(10000, 20000);
  1627. /* Disable Atlas Tx lanes; re-enabled in reset path */
  1628. if (hw->mac.type == ixgbe_mac_82598EB) {
  1629. u8 atlas;
  1630. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
  1631. atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
  1632. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
  1633. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
  1634. atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
  1635. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
  1636. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
  1637. atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
  1638. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
  1639. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
  1640. atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
  1641. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
  1642. }
  1643. return 0;
  1644. }
  1645. static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
  1646. {
  1647. u32 reg_data;
  1648. reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
  1649. reg_data &= ~IXGBE_HLREG0_LPBK;
  1650. IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
  1651. }
  1652. static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
  1653. unsigned int frame_size)
  1654. {
  1655. memset(skb->data, 0xFF, frame_size);
  1656. frame_size >>= 1;
  1657. memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
  1658. memset(&skb->data[frame_size + 10], 0xBE, 1);
  1659. memset(&skb->data[frame_size + 12], 0xAF, 1);
  1660. }
  1661. static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
  1662. unsigned int frame_size)
  1663. {
  1664. unsigned char *data;
  1665. bool match = true;
  1666. frame_size >>= 1;
  1667. data = kmap(rx_buffer->page) + rx_buffer->page_offset;
  1668. if (data[3] != 0xFF ||
  1669. data[frame_size + 10] != 0xBE ||
  1670. data[frame_size + 12] != 0xAF)
  1671. match = false;
  1672. kunmap(rx_buffer->page);
  1673. return match;
  1674. }
  1675. static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
  1676. struct ixgbe_ring *tx_ring,
  1677. unsigned int size)
  1678. {
  1679. union ixgbe_adv_rx_desc *rx_desc;
  1680. u16 rx_ntc, tx_ntc, count = 0;
  1681. /* initialize next to clean and descriptor values */
  1682. rx_ntc = rx_ring->next_to_clean;
  1683. tx_ntc = tx_ring->next_to_clean;
  1684. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
  1685. while (tx_ntc != tx_ring->next_to_use) {
  1686. union ixgbe_adv_tx_desc *tx_desc;
  1687. struct ixgbe_tx_buffer *tx_buffer;
  1688. tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
  1689. /* if DD is not set transmit has not completed */
  1690. if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
  1691. return count;
  1692. /* unmap buffer on Tx side */
  1693. tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
  1694. /* Free all the Tx ring sk_buffs */
  1695. dev_kfree_skb_any(tx_buffer->skb);
  1696. /* unmap skb header data */
  1697. dma_unmap_single(tx_ring->dev,
  1698. dma_unmap_addr(tx_buffer, dma),
  1699. dma_unmap_len(tx_buffer, len),
  1700. DMA_TO_DEVICE);
  1701. dma_unmap_len_set(tx_buffer, len, 0);
  1702. /* increment Tx next to clean counter */
  1703. tx_ntc++;
  1704. if (tx_ntc == tx_ring->count)
  1705. tx_ntc = 0;
  1706. }
  1707. while (rx_desc->wb.upper.length) {
  1708. struct ixgbe_rx_buffer *rx_buffer;
  1709. /* check Rx buffer */
  1710. rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
  1711. /* sync Rx buffer for CPU read */
  1712. dma_sync_single_for_cpu(rx_ring->dev,
  1713. rx_buffer->dma,
  1714. ixgbe_rx_bufsz(rx_ring),
  1715. DMA_FROM_DEVICE);
  1716. /* verify contents of skb */
  1717. if (ixgbe_check_lbtest_frame(rx_buffer, size))
  1718. count++;
  1719. else
  1720. break;
  1721. /* sync Rx buffer for device write */
  1722. dma_sync_single_for_device(rx_ring->dev,
  1723. rx_buffer->dma,
  1724. ixgbe_rx_bufsz(rx_ring),
  1725. DMA_FROM_DEVICE);
  1726. /* increment Rx next to clean counter */
  1727. rx_ntc++;
  1728. if (rx_ntc == rx_ring->count)
  1729. rx_ntc = 0;
  1730. /* fetch next descriptor */
  1731. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
  1732. }
  1733. netdev_tx_reset_queue(txring_txq(tx_ring));
  1734. /* re-map buffers to ring, store next to clean values */
  1735. ixgbe_alloc_rx_buffers(rx_ring, count);
  1736. rx_ring->next_to_clean = rx_ntc;
  1737. tx_ring->next_to_clean = tx_ntc;
  1738. return count;
  1739. }
  1740. static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
  1741. {
  1742. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1743. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1744. int i, j, lc, good_cnt, ret_val = 0;
  1745. unsigned int size = 1024;
  1746. netdev_tx_t tx_ret_val;
  1747. struct sk_buff *skb;
  1748. u32 flags_orig = adapter->flags;
  1749. /* DCB can modify the frames on Tx */
  1750. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  1751. /* allocate test skb */
  1752. skb = alloc_skb(size, GFP_KERNEL);
  1753. if (!skb)
  1754. return 11;
  1755. /* place data into test skb */
  1756. ixgbe_create_lbtest_frame(skb, size);
  1757. skb_put(skb, size);
  1758. /*
  1759. * Calculate the loop count based on the largest descriptor ring
  1760. * The idea is to wrap the largest ring a number of times using 64
  1761. * send/receive pairs during each loop
  1762. */
  1763. if (rx_ring->count <= tx_ring->count)
  1764. lc = ((tx_ring->count / 64) * 2) + 1;
  1765. else
  1766. lc = ((rx_ring->count / 64) * 2) + 1;
  1767. for (j = 0; j <= lc; j++) {
  1768. /* reset count of good packets */
  1769. good_cnt = 0;
  1770. /* place 64 packets on the transmit queue*/
  1771. for (i = 0; i < 64; i++) {
  1772. skb_get(skb);
  1773. tx_ret_val = ixgbe_xmit_frame_ring(skb,
  1774. adapter,
  1775. tx_ring);
  1776. if (tx_ret_val == NETDEV_TX_OK)
  1777. good_cnt++;
  1778. }
  1779. if (good_cnt != 64) {
  1780. ret_val = 12;
  1781. break;
  1782. }
  1783. /* allow 200 milliseconds for packets to go from Tx to Rx */
  1784. msleep(200);
  1785. good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
  1786. if (good_cnt != 64) {
  1787. ret_val = 13;
  1788. break;
  1789. }
  1790. }
  1791. /* free the original skb */
  1792. kfree_skb(skb);
  1793. adapter->flags = flags_orig;
  1794. return ret_val;
  1795. }
  1796. static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
  1797. {
  1798. *data = ixgbe_setup_desc_rings(adapter);
  1799. if (*data)
  1800. goto out;
  1801. *data = ixgbe_setup_loopback_test(adapter);
  1802. if (*data)
  1803. goto err_loopback;
  1804. *data = ixgbe_run_loopback_test(adapter);
  1805. ixgbe_loopback_cleanup(adapter);
  1806. err_loopback:
  1807. ixgbe_free_desc_rings(adapter);
  1808. out:
  1809. return *data;
  1810. }
  1811. static void ixgbe_diag_test(struct net_device *netdev,
  1812. struct ethtool_test *eth_test, u64 *data)
  1813. {
  1814. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1815. bool if_running = netif_running(netdev);
  1816. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1817. e_err(hw, "Adapter removed - test blocked\n");
  1818. data[0] = 1;
  1819. data[1] = 1;
  1820. data[2] = 1;
  1821. data[3] = 1;
  1822. data[4] = 1;
  1823. eth_test->flags |= ETH_TEST_FL_FAILED;
  1824. return;
  1825. }
  1826. set_bit(__IXGBE_TESTING, &adapter->state);
  1827. if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
  1828. struct ixgbe_hw *hw = &adapter->hw;
  1829. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  1830. int i;
  1831. for (i = 0; i < adapter->num_vfs; i++) {
  1832. if (adapter->vfinfo[i].clear_to_send) {
  1833. netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
  1834. data[0] = 1;
  1835. data[1] = 1;
  1836. data[2] = 1;
  1837. data[3] = 1;
  1838. data[4] = 1;
  1839. eth_test->flags |= ETH_TEST_FL_FAILED;
  1840. clear_bit(__IXGBE_TESTING,
  1841. &adapter->state);
  1842. goto skip_ol_tests;
  1843. }
  1844. }
  1845. }
  1846. /* Offline tests */
  1847. e_info(hw, "offline testing starting\n");
  1848. /* Link test performed before hardware reset so autoneg doesn't
  1849. * interfere with test result
  1850. */
  1851. if (ixgbe_link_test(adapter, &data[4]))
  1852. eth_test->flags |= ETH_TEST_FL_FAILED;
  1853. if (if_running)
  1854. /* indicate we're in test mode */
  1855. ixgbe_close(netdev);
  1856. else
  1857. ixgbe_reset(adapter);
  1858. e_info(hw, "register testing starting\n");
  1859. if (ixgbe_reg_test(adapter, &data[0]))
  1860. eth_test->flags |= ETH_TEST_FL_FAILED;
  1861. ixgbe_reset(adapter);
  1862. e_info(hw, "eeprom testing starting\n");
  1863. if (ixgbe_eeprom_test(adapter, &data[1]))
  1864. eth_test->flags |= ETH_TEST_FL_FAILED;
  1865. ixgbe_reset(adapter);
  1866. e_info(hw, "interrupt testing starting\n");
  1867. if (ixgbe_intr_test(adapter, &data[2]))
  1868. eth_test->flags |= ETH_TEST_FL_FAILED;
  1869. /* If SRIOV or VMDq is enabled then skip MAC
  1870. * loopback diagnostic. */
  1871. if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
  1872. IXGBE_FLAG_VMDQ_ENABLED)) {
  1873. e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
  1874. data[3] = 0;
  1875. goto skip_loopback;
  1876. }
  1877. ixgbe_reset(adapter);
  1878. e_info(hw, "loopback testing starting\n");
  1879. if (ixgbe_loopback_test(adapter, &data[3]))
  1880. eth_test->flags |= ETH_TEST_FL_FAILED;
  1881. skip_loopback:
  1882. ixgbe_reset(adapter);
  1883. /* clear testing bit and return adapter to previous state */
  1884. clear_bit(__IXGBE_TESTING, &adapter->state);
  1885. if (if_running)
  1886. ixgbe_open(netdev);
  1887. else if (hw->mac.ops.disable_tx_laser)
  1888. hw->mac.ops.disable_tx_laser(hw);
  1889. } else {
  1890. e_info(hw, "online testing starting\n");
  1891. /* Online tests */
  1892. if (ixgbe_link_test(adapter, &data[4]))
  1893. eth_test->flags |= ETH_TEST_FL_FAILED;
  1894. /* Offline tests aren't run; pass by default */
  1895. data[0] = 0;
  1896. data[1] = 0;
  1897. data[2] = 0;
  1898. data[3] = 0;
  1899. clear_bit(__IXGBE_TESTING, &adapter->state);
  1900. }
  1901. skip_ol_tests:
  1902. msleep_interruptible(4 * 1000);
  1903. }
  1904. static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
  1905. struct ethtool_wolinfo *wol)
  1906. {
  1907. struct ixgbe_hw *hw = &adapter->hw;
  1908. int retval = 0;
  1909. /* WOL not supported for all devices */
  1910. if (!ixgbe_wol_supported(adapter, hw->device_id,
  1911. hw->subsystem_device_id)) {
  1912. retval = 1;
  1913. wol->supported = 0;
  1914. }
  1915. return retval;
  1916. }
  1917. static void ixgbe_get_wol(struct net_device *netdev,
  1918. struct ethtool_wolinfo *wol)
  1919. {
  1920. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1921. wol->supported = WAKE_UCAST | WAKE_MCAST |
  1922. WAKE_BCAST | WAKE_MAGIC;
  1923. wol->wolopts = 0;
  1924. if (ixgbe_wol_exclusion(adapter, wol) ||
  1925. !device_can_wakeup(&adapter->pdev->dev))
  1926. return;
  1927. if (adapter->wol & IXGBE_WUFC_EX)
  1928. wol->wolopts |= WAKE_UCAST;
  1929. if (adapter->wol & IXGBE_WUFC_MC)
  1930. wol->wolopts |= WAKE_MCAST;
  1931. if (adapter->wol & IXGBE_WUFC_BC)
  1932. wol->wolopts |= WAKE_BCAST;
  1933. if (adapter->wol & IXGBE_WUFC_MAG)
  1934. wol->wolopts |= WAKE_MAGIC;
  1935. }
  1936. static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  1937. {
  1938. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1939. if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
  1940. return -EOPNOTSUPP;
  1941. if (ixgbe_wol_exclusion(adapter, wol))
  1942. return wol->wolopts ? -EOPNOTSUPP : 0;
  1943. adapter->wol = 0;
  1944. if (wol->wolopts & WAKE_UCAST)
  1945. adapter->wol |= IXGBE_WUFC_EX;
  1946. if (wol->wolopts & WAKE_MCAST)
  1947. adapter->wol |= IXGBE_WUFC_MC;
  1948. if (wol->wolopts & WAKE_BCAST)
  1949. adapter->wol |= IXGBE_WUFC_BC;
  1950. if (wol->wolopts & WAKE_MAGIC)
  1951. adapter->wol |= IXGBE_WUFC_MAG;
  1952. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  1953. return 0;
  1954. }
  1955. static int ixgbe_nway_reset(struct net_device *netdev)
  1956. {
  1957. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1958. if (netif_running(netdev))
  1959. ixgbe_reinit_locked(adapter);
  1960. return 0;
  1961. }
  1962. static int ixgbe_set_phys_id(struct net_device *netdev,
  1963. enum ethtool_phys_id_state state)
  1964. {
  1965. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1966. struct ixgbe_hw *hw = &adapter->hw;
  1967. if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
  1968. return -EOPNOTSUPP;
  1969. switch (state) {
  1970. case ETHTOOL_ID_ACTIVE:
  1971. adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  1972. return 2;
  1973. case ETHTOOL_ID_ON:
  1974. hw->mac.ops.led_on(hw, hw->mac.led_link_act);
  1975. break;
  1976. case ETHTOOL_ID_OFF:
  1977. hw->mac.ops.led_off(hw, hw->mac.led_link_act);
  1978. break;
  1979. case ETHTOOL_ID_INACTIVE:
  1980. /* Restore LED settings */
  1981. IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
  1982. break;
  1983. }
  1984. return 0;
  1985. }
  1986. static int ixgbe_get_coalesce(struct net_device *netdev,
  1987. struct ethtool_coalesce *ec)
  1988. {
  1989. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1990. /* only valid if in constant ITR mode */
  1991. if (adapter->rx_itr_setting <= 1)
  1992. ec->rx_coalesce_usecs = adapter->rx_itr_setting;
  1993. else
  1994. ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
  1995. /* if in mixed tx/rx queues per vector mode, report only rx settings */
  1996. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  1997. return 0;
  1998. /* only valid if in constant ITR mode */
  1999. if (adapter->tx_itr_setting <= 1)
  2000. ec->tx_coalesce_usecs = adapter->tx_itr_setting;
  2001. else
  2002. ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
  2003. return 0;
  2004. }
  2005. /*
  2006. * this function must be called before setting the new value of
  2007. * rx_itr_setting
  2008. */
  2009. static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
  2010. {
  2011. struct net_device *netdev = adapter->netdev;
  2012. /* nothing to do if LRO or RSC are not enabled */
  2013. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
  2014. !(netdev->features & NETIF_F_LRO))
  2015. return false;
  2016. /* check the feature flag value and enable RSC if necessary */
  2017. if (adapter->rx_itr_setting == 1 ||
  2018. adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
  2019. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
  2020. adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
  2021. e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
  2022. return true;
  2023. }
  2024. /* if interrupt rate is too high then disable RSC */
  2025. } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  2026. adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
  2027. e_info(probe, "rx-usecs set too low, disabling RSC\n");
  2028. return true;
  2029. }
  2030. return false;
  2031. }
  2032. static int ixgbe_set_coalesce(struct net_device *netdev,
  2033. struct ethtool_coalesce *ec)
  2034. {
  2035. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2036. struct ixgbe_q_vector *q_vector;
  2037. int i;
  2038. u16 tx_itr_param, rx_itr_param, tx_itr_prev;
  2039. bool need_reset = false;
  2040. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
  2041. /* reject Tx specific changes in case of mixed RxTx vectors */
  2042. if (ec->tx_coalesce_usecs)
  2043. return -EINVAL;
  2044. tx_itr_prev = adapter->rx_itr_setting;
  2045. } else {
  2046. tx_itr_prev = adapter->tx_itr_setting;
  2047. }
  2048. if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
  2049. (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
  2050. return -EINVAL;
  2051. if (ec->rx_coalesce_usecs > 1)
  2052. adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
  2053. else
  2054. adapter->rx_itr_setting = ec->rx_coalesce_usecs;
  2055. if (adapter->rx_itr_setting == 1)
  2056. rx_itr_param = IXGBE_20K_ITR;
  2057. else
  2058. rx_itr_param = adapter->rx_itr_setting;
  2059. if (ec->tx_coalesce_usecs > 1)
  2060. adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
  2061. else
  2062. adapter->tx_itr_setting = ec->tx_coalesce_usecs;
  2063. if (adapter->tx_itr_setting == 1)
  2064. tx_itr_param = IXGBE_12K_ITR;
  2065. else
  2066. tx_itr_param = adapter->tx_itr_setting;
  2067. /* mixed Rx/Tx */
  2068. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  2069. adapter->tx_itr_setting = adapter->rx_itr_setting;
  2070. /* detect ITR changes that require update of TXDCTL.WTHRESH */
  2071. if ((adapter->tx_itr_setting != 1) &&
  2072. (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
  2073. if ((tx_itr_prev == 1) ||
  2074. (tx_itr_prev >= IXGBE_100K_ITR))
  2075. need_reset = true;
  2076. } else {
  2077. if ((tx_itr_prev != 1) &&
  2078. (tx_itr_prev < IXGBE_100K_ITR))
  2079. need_reset = true;
  2080. }
  2081. /* check the old value and enable RSC if necessary */
  2082. need_reset |= ixgbe_update_rsc(adapter);
  2083. for (i = 0; i < adapter->num_q_vectors; i++) {
  2084. q_vector = adapter->q_vector[i];
  2085. if (q_vector->tx.count && !q_vector->rx.count)
  2086. /* tx only */
  2087. q_vector->itr = tx_itr_param;
  2088. else
  2089. /* rx only or mixed */
  2090. q_vector->itr = rx_itr_param;
  2091. ixgbe_write_eitr(q_vector);
  2092. }
  2093. /*
  2094. * do reset here at the end to make sure EITR==0 case is handled
  2095. * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
  2096. * also locks in RSC enable/disable which requires reset
  2097. */
  2098. if (need_reset)
  2099. ixgbe_do_reset(netdev);
  2100. return 0;
  2101. }
  2102. static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2103. struct ethtool_rxnfc *cmd)
  2104. {
  2105. union ixgbe_atr_input *mask = &adapter->fdir_mask;
  2106. struct ethtool_rx_flow_spec *fsp =
  2107. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2108. struct hlist_node *node2;
  2109. struct ixgbe_fdir_filter *rule = NULL;
  2110. /* report total rule count */
  2111. cmd->data = (1024 << adapter->fdir_pballoc) - 2;
  2112. hlist_for_each_entry_safe(rule, node2,
  2113. &adapter->fdir_filter_list, fdir_node) {
  2114. if (fsp->location <= rule->sw_idx)
  2115. break;
  2116. }
  2117. if (!rule || fsp->location != rule->sw_idx)
  2118. return -EINVAL;
  2119. /* fill out the flow spec entry */
  2120. /* set flow type field */
  2121. switch (rule->filter.formatted.flow_type) {
  2122. case IXGBE_ATR_FLOW_TYPE_TCPV4:
  2123. fsp->flow_type = TCP_V4_FLOW;
  2124. break;
  2125. case IXGBE_ATR_FLOW_TYPE_UDPV4:
  2126. fsp->flow_type = UDP_V4_FLOW;
  2127. break;
  2128. case IXGBE_ATR_FLOW_TYPE_SCTPV4:
  2129. fsp->flow_type = SCTP_V4_FLOW;
  2130. break;
  2131. case IXGBE_ATR_FLOW_TYPE_IPV4:
  2132. fsp->flow_type = IP_USER_FLOW;
  2133. fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
  2134. fsp->h_u.usr_ip4_spec.proto = 0;
  2135. fsp->m_u.usr_ip4_spec.proto = 0;
  2136. break;
  2137. default:
  2138. return -EINVAL;
  2139. }
  2140. fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
  2141. fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
  2142. fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
  2143. fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
  2144. fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
  2145. fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
  2146. fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
  2147. fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
  2148. fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
  2149. fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
  2150. fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
  2151. fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
  2152. fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
  2153. fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
  2154. fsp->flow_type |= FLOW_EXT;
  2155. /* record action */
  2156. if (rule->action == IXGBE_FDIR_DROP_QUEUE)
  2157. fsp->ring_cookie = RX_CLS_FLOW_DISC;
  2158. else
  2159. fsp->ring_cookie = rule->action;
  2160. return 0;
  2161. }
  2162. static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
  2163. struct ethtool_rxnfc *cmd,
  2164. u32 *rule_locs)
  2165. {
  2166. struct hlist_node *node2;
  2167. struct ixgbe_fdir_filter *rule;
  2168. int cnt = 0;
  2169. /* report total rule count */
  2170. cmd->data = (1024 << adapter->fdir_pballoc) - 2;
  2171. hlist_for_each_entry_safe(rule, node2,
  2172. &adapter->fdir_filter_list, fdir_node) {
  2173. if (cnt == cmd->rule_cnt)
  2174. return -EMSGSIZE;
  2175. rule_locs[cnt] = rule->sw_idx;
  2176. cnt++;
  2177. }
  2178. cmd->rule_cnt = cnt;
  2179. return 0;
  2180. }
  2181. static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
  2182. struct ethtool_rxnfc *cmd)
  2183. {
  2184. cmd->data = 0;
  2185. /* Report default options for RSS on ixgbe */
  2186. switch (cmd->flow_type) {
  2187. case TCP_V4_FLOW:
  2188. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2189. /* fallthrough */
  2190. case UDP_V4_FLOW:
  2191. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  2192. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2193. /* fallthrough */
  2194. case SCTP_V4_FLOW:
  2195. case AH_ESP_V4_FLOW:
  2196. case AH_V4_FLOW:
  2197. case ESP_V4_FLOW:
  2198. case IPV4_FLOW:
  2199. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  2200. break;
  2201. case TCP_V6_FLOW:
  2202. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2203. /* fallthrough */
  2204. case UDP_V6_FLOW:
  2205. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2206. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2207. /* fallthrough */
  2208. case SCTP_V6_FLOW:
  2209. case AH_ESP_V6_FLOW:
  2210. case AH_V6_FLOW:
  2211. case ESP_V6_FLOW:
  2212. case IPV6_FLOW:
  2213. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  2214. break;
  2215. default:
  2216. return -EINVAL;
  2217. }
  2218. return 0;
  2219. }
  2220. static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  2221. u32 *rule_locs)
  2222. {
  2223. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2224. int ret = -EOPNOTSUPP;
  2225. switch (cmd->cmd) {
  2226. case ETHTOOL_GRXRINGS:
  2227. cmd->data = adapter->num_rx_queues;
  2228. ret = 0;
  2229. break;
  2230. case ETHTOOL_GRXCLSRLCNT:
  2231. cmd->rule_cnt = adapter->fdir_filter_count;
  2232. ret = 0;
  2233. break;
  2234. case ETHTOOL_GRXCLSRULE:
  2235. ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
  2236. break;
  2237. case ETHTOOL_GRXCLSRLALL:
  2238. ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
  2239. break;
  2240. case ETHTOOL_GRXFH:
  2241. ret = ixgbe_get_rss_hash_opts(adapter, cmd);
  2242. break;
  2243. default:
  2244. break;
  2245. }
  2246. return ret;
  2247. }
  2248. int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2249. struct ixgbe_fdir_filter *input,
  2250. u16 sw_idx)
  2251. {
  2252. struct ixgbe_hw *hw = &adapter->hw;
  2253. struct hlist_node *node2;
  2254. struct ixgbe_fdir_filter *rule, *parent;
  2255. int err = -EINVAL;
  2256. parent = NULL;
  2257. rule = NULL;
  2258. hlist_for_each_entry_safe(rule, node2,
  2259. &adapter->fdir_filter_list, fdir_node) {
  2260. /* hash found, or no matching entry */
  2261. if (rule->sw_idx >= sw_idx)
  2262. break;
  2263. parent = rule;
  2264. }
  2265. /* if there is an old rule occupying our place remove it */
  2266. if (rule && (rule->sw_idx == sw_idx)) {
  2267. if (!input || (rule->filter.formatted.bkt_hash !=
  2268. input->filter.formatted.bkt_hash)) {
  2269. err = ixgbe_fdir_erase_perfect_filter_82599(hw,
  2270. &rule->filter,
  2271. sw_idx);
  2272. }
  2273. hlist_del(&rule->fdir_node);
  2274. kfree(rule);
  2275. adapter->fdir_filter_count--;
  2276. }
  2277. /*
  2278. * If no input this was a delete, err should be 0 if a rule was
  2279. * successfully found and removed from the list else -EINVAL
  2280. */
  2281. if (!input)
  2282. return err;
  2283. /* initialize node and set software index */
  2284. INIT_HLIST_NODE(&input->fdir_node);
  2285. /* add filter to the list */
  2286. if (parent)
  2287. hlist_add_behind(&input->fdir_node, &parent->fdir_node);
  2288. else
  2289. hlist_add_head(&input->fdir_node,
  2290. &adapter->fdir_filter_list);
  2291. /* update counts */
  2292. adapter->fdir_filter_count++;
  2293. return 0;
  2294. }
  2295. static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
  2296. u8 *flow_type)
  2297. {
  2298. switch (fsp->flow_type & ~FLOW_EXT) {
  2299. case TCP_V4_FLOW:
  2300. *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  2301. break;
  2302. case UDP_V4_FLOW:
  2303. *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
  2304. break;
  2305. case SCTP_V4_FLOW:
  2306. *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
  2307. break;
  2308. case IP_USER_FLOW:
  2309. switch (fsp->h_u.usr_ip4_spec.proto) {
  2310. case IPPROTO_TCP:
  2311. *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  2312. break;
  2313. case IPPROTO_UDP:
  2314. *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
  2315. break;
  2316. case IPPROTO_SCTP:
  2317. *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
  2318. break;
  2319. case 0:
  2320. if (!fsp->m_u.usr_ip4_spec.proto) {
  2321. *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
  2322. break;
  2323. }
  2324. /* fall through */
  2325. default:
  2326. return 0;
  2327. }
  2328. break;
  2329. default:
  2330. return 0;
  2331. }
  2332. return 1;
  2333. }
  2334. static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2335. struct ethtool_rxnfc *cmd)
  2336. {
  2337. struct ethtool_rx_flow_spec *fsp =
  2338. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2339. struct ixgbe_hw *hw = &adapter->hw;
  2340. struct ixgbe_fdir_filter *input;
  2341. union ixgbe_atr_input mask;
  2342. u8 queue;
  2343. int err;
  2344. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  2345. return -EOPNOTSUPP;
  2346. /* ring_cookie is a masked into a set of queues and ixgbe pools or
  2347. * we use the drop index.
  2348. */
  2349. if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
  2350. queue = IXGBE_FDIR_DROP_QUEUE;
  2351. } else {
  2352. u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
  2353. u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
  2354. if (!vf && (ring >= adapter->num_rx_queues))
  2355. return -EINVAL;
  2356. else if (vf &&
  2357. ((vf > adapter->num_vfs) ||
  2358. ring >= adapter->num_rx_queues_per_pool))
  2359. return -EINVAL;
  2360. /* Map the ring onto the absolute queue index */
  2361. if (!vf)
  2362. queue = adapter->rx_ring[ring]->reg_idx;
  2363. else
  2364. queue = ((vf - 1) *
  2365. adapter->num_rx_queues_per_pool) + ring;
  2366. }
  2367. /* Don't allow indexes to exist outside of available space */
  2368. if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
  2369. e_err(drv, "Location out of range\n");
  2370. return -EINVAL;
  2371. }
  2372. input = kzalloc(sizeof(*input), GFP_ATOMIC);
  2373. if (!input)
  2374. return -ENOMEM;
  2375. memset(&mask, 0, sizeof(union ixgbe_atr_input));
  2376. /* set SW index */
  2377. input->sw_idx = fsp->location;
  2378. /* record flow type */
  2379. if (!ixgbe_flowspec_to_flow_type(fsp,
  2380. &input->filter.formatted.flow_type)) {
  2381. e_err(drv, "Unrecognized flow type\n");
  2382. goto err_out;
  2383. }
  2384. mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
  2385. IXGBE_ATR_L4TYPE_MASK;
  2386. if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
  2387. mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
  2388. /* Copy input into formatted structures */
  2389. input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
  2390. mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
  2391. input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
  2392. mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
  2393. input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
  2394. mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
  2395. input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
  2396. mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
  2397. if (fsp->flow_type & FLOW_EXT) {
  2398. input->filter.formatted.vm_pool =
  2399. (unsigned char)ntohl(fsp->h_ext.data[1]);
  2400. mask.formatted.vm_pool =
  2401. (unsigned char)ntohl(fsp->m_ext.data[1]);
  2402. input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
  2403. mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
  2404. input->filter.formatted.flex_bytes =
  2405. fsp->h_ext.vlan_etype;
  2406. mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
  2407. }
  2408. /* determine if we need to drop or route the packet */
  2409. if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
  2410. input->action = IXGBE_FDIR_DROP_QUEUE;
  2411. else
  2412. input->action = fsp->ring_cookie;
  2413. spin_lock(&adapter->fdir_perfect_lock);
  2414. if (hlist_empty(&adapter->fdir_filter_list)) {
  2415. /* save mask and program input mask into HW */
  2416. memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
  2417. err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
  2418. if (err) {
  2419. e_err(drv, "Error writing mask\n");
  2420. goto err_out_w_lock;
  2421. }
  2422. } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
  2423. e_err(drv, "Only one mask supported per port\n");
  2424. goto err_out_w_lock;
  2425. }
  2426. /* apply mask and compute/store hash */
  2427. ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
  2428. /* program filters to filter memory */
  2429. err = ixgbe_fdir_write_perfect_filter_82599(hw,
  2430. &input->filter, input->sw_idx, queue);
  2431. if (err)
  2432. goto err_out_w_lock;
  2433. ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
  2434. spin_unlock(&adapter->fdir_perfect_lock);
  2435. return err;
  2436. err_out_w_lock:
  2437. spin_unlock(&adapter->fdir_perfect_lock);
  2438. err_out:
  2439. kfree(input);
  2440. return -EINVAL;
  2441. }
  2442. static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2443. struct ethtool_rxnfc *cmd)
  2444. {
  2445. struct ethtool_rx_flow_spec *fsp =
  2446. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2447. int err;
  2448. spin_lock(&adapter->fdir_perfect_lock);
  2449. err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
  2450. spin_unlock(&adapter->fdir_perfect_lock);
  2451. return err;
  2452. }
  2453. #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
  2454. IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2455. static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
  2456. struct ethtool_rxnfc *nfc)
  2457. {
  2458. u32 flags2 = adapter->flags2;
  2459. /*
  2460. * RSS does not support anything other than hashing
  2461. * to queues on src and dst IPs and ports
  2462. */
  2463. if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
  2464. RXH_L4_B_0_1 | RXH_L4_B_2_3))
  2465. return -EINVAL;
  2466. switch (nfc->flow_type) {
  2467. case TCP_V4_FLOW:
  2468. case TCP_V6_FLOW:
  2469. if (!(nfc->data & RXH_IP_SRC) ||
  2470. !(nfc->data & RXH_IP_DST) ||
  2471. !(nfc->data & RXH_L4_B_0_1) ||
  2472. !(nfc->data & RXH_L4_B_2_3))
  2473. return -EINVAL;
  2474. break;
  2475. case UDP_V4_FLOW:
  2476. if (!(nfc->data & RXH_IP_SRC) ||
  2477. !(nfc->data & RXH_IP_DST))
  2478. return -EINVAL;
  2479. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  2480. case 0:
  2481. flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
  2482. break;
  2483. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  2484. flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
  2485. break;
  2486. default:
  2487. return -EINVAL;
  2488. }
  2489. break;
  2490. case UDP_V6_FLOW:
  2491. if (!(nfc->data & RXH_IP_SRC) ||
  2492. !(nfc->data & RXH_IP_DST))
  2493. return -EINVAL;
  2494. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  2495. case 0:
  2496. flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
  2497. break;
  2498. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  2499. flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
  2500. break;
  2501. default:
  2502. return -EINVAL;
  2503. }
  2504. break;
  2505. case AH_ESP_V4_FLOW:
  2506. case AH_V4_FLOW:
  2507. case ESP_V4_FLOW:
  2508. case SCTP_V4_FLOW:
  2509. case AH_ESP_V6_FLOW:
  2510. case AH_V6_FLOW:
  2511. case ESP_V6_FLOW:
  2512. case SCTP_V6_FLOW:
  2513. if (!(nfc->data & RXH_IP_SRC) ||
  2514. !(nfc->data & RXH_IP_DST) ||
  2515. (nfc->data & RXH_L4_B_0_1) ||
  2516. (nfc->data & RXH_L4_B_2_3))
  2517. return -EINVAL;
  2518. break;
  2519. default:
  2520. return -EINVAL;
  2521. }
  2522. /* if we changed something we need to update flags */
  2523. if (flags2 != adapter->flags2) {
  2524. struct ixgbe_hw *hw = &adapter->hw;
  2525. u32 mrqc;
  2526. unsigned int pf_pool = adapter->num_vfs;
  2527. if ((hw->mac.type >= ixgbe_mac_X550) &&
  2528. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  2529. mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
  2530. else
  2531. mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
  2532. if ((flags2 & UDP_RSS_FLAGS) &&
  2533. !(adapter->flags2 & UDP_RSS_FLAGS))
  2534. e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
  2535. adapter->flags2 = flags2;
  2536. /* Perform hash on these packet types */
  2537. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
  2538. | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
  2539. | IXGBE_MRQC_RSS_FIELD_IPV6
  2540. | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
  2541. mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
  2542. IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
  2543. if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  2544. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
  2545. if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2546. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
  2547. if ((hw->mac.type >= ixgbe_mac_X550) &&
  2548. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  2549. IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
  2550. else
  2551. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  2552. }
  2553. return 0;
  2554. }
  2555. static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  2556. {
  2557. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2558. int ret = -EOPNOTSUPP;
  2559. switch (cmd->cmd) {
  2560. case ETHTOOL_SRXCLSRLINS:
  2561. ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
  2562. break;
  2563. case ETHTOOL_SRXCLSRLDEL:
  2564. ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
  2565. break;
  2566. case ETHTOOL_SRXFH:
  2567. ret = ixgbe_set_rss_hash_opt(adapter, cmd);
  2568. break;
  2569. default:
  2570. break;
  2571. }
  2572. return ret;
  2573. }
  2574. static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
  2575. {
  2576. if (adapter->hw.mac.type < ixgbe_mac_X550)
  2577. return 16;
  2578. else
  2579. return 64;
  2580. }
  2581. static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
  2582. {
  2583. return IXGBE_RSS_KEY_SIZE;
  2584. }
  2585. static u32 ixgbe_rss_indir_size(struct net_device *netdev)
  2586. {
  2587. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2588. return ixgbe_rss_indir_tbl_entries(adapter);
  2589. }
  2590. static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
  2591. {
  2592. int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
  2593. u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
  2594. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  2595. rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
  2596. for (i = 0; i < reta_size; i++)
  2597. indir[i] = adapter->rss_indir_tbl[i] & rss_m;
  2598. }
  2599. static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
  2600. u8 *hfunc)
  2601. {
  2602. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2603. if (hfunc)
  2604. *hfunc = ETH_RSS_HASH_TOP;
  2605. if (indir)
  2606. ixgbe_get_reta(adapter, indir);
  2607. if (key)
  2608. memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
  2609. return 0;
  2610. }
  2611. static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
  2612. const u8 *key, const u8 hfunc)
  2613. {
  2614. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2615. int i;
  2616. u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  2617. if (hfunc)
  2618. return -EINVAL;
  2619. /* Fill out the redirection table */
  2620. if (indir) {
  2621. int max_queues = min_t(int, adapter->num_rx_queues,
  2622. ixgbe_rss_indir_tbl_max(adapter));
  2623. /*Allow at least 2 queues w/ SR-IOV.*/
  2624. if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
  2625. (max_queues < 2))
  2626. max_queues = 2;
  2627. /* Verify user input. */
  2628. for (i = 0; i < reta_entries; i++)
  2629. if (indir[i] >= max_queues)
  2630. return -EINVAL;
  2631. for (i = 0; i < reta_entries; i++)
  2632. adapter->rss_indir_tbl[i] = indir[i];
  2633. ixgbe_store_reta(adapter);
  2634. }
  2635. /* Fill out the rss hash key */
  2636. if (key) {
  2637. memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
  2638. ixgbe_store_key(adapter);
  2639. }
  2640. return 0;
  2641. }
  2642. static int ixgbe_get_ts_info(struct net_device *dev,
  2643. struct ethtool_ts_info *info)
  2644. {
  2645. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2646. /* we always support timestamping disabled */
  2647. info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
  2648. switch (adapter->hw.mac.type) {
  2649. case ixgbe_mac_X550:
  2650. case ixgbe_mac_X550EM_x:
  2651. case ixgbe_mac_x550em_a:
  2652. info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
  2653. break;
  2654. case ixgbe_mac_X540:
  2655. case ixgbe_mac_82599EB:
  2656. info->rx_filters |=
  2657. BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
  2658. BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
  2659. BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
  2660. break;
  2661. default:
  2662. return ethtool_op_get_ts_info(dev, info);
  2663. }
  2664. info->so_timestamping =
  2665. SOF_TIMESTAMPING_TX_SOFTWARE |
  2666. SOF_TIMESTAMPING_RX_SOFTWARE |
  2667. SOF_TIMESTAMPING_SOFTWARE |
  2668. SOF_TIMESTAMPING_TX_HARDWARE |
  2669. SOF_TIMESTAMPING_RX_HARDWARE |
  2670. SOF_TIMESTAMPING_RAW_HARDWARE;
  2671. if (adapter->ptp_clock)
  2672. info->phc_index = ptp_clock_index(adapter->ptp_clock);
  2673. else
  2674. info->phc_index = -1;
  2675. info->tx_types =
  2676. BIT(HWTSTAMP_TX_OFF) |
  2677. BIT(HWTSTAMP_TX_ON);
  2678. return 0;
  2679. }
  2680. static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
  2681. {
  2682. unsigned int max_combined;
  2683. u8 tcs = adapter->hw_tcs;
  2684. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  2685. /* We only support one q_vector without MSI-X */
  2686. max_combined = 1;
  2687. } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  2688. /* Limit value based on the queue mask */
  2689. max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
  2690. } else if (tcs > 1) {
  2691. /* For DCB report channels per traffic class */
  2692. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  2693. /* 8 TC w/ 4 queues per TC */
  2694. max_combined = 4;
  2695. } else if (tcs > 4) {
  2696. /* 8 TC w/ 8 queues per TC */
  2697. max_combined = 8;
  2698. } else {
  2699. /* 4 TC w/ 16 queues per TC */
  2700. max_combined = 16;
  2701. }
  2702. } else if (adapter->atr_sample_rate) {
  2703. /* support up to 64 queues with ATR */
  2704. max_combined = IXGBE_MAX_FDIR_INDICES;
  2705. } else {
  2706. /* support up to 16 queues with RSS */
  2707. max_combined = ixgbe_max_rss_indices(adapter);
  2708. }
  2709. return max_combined;
  2710. }
  2711. static void ixgbe_get_channels(struct net_device *dev,
  2712. struct ethtool_channels *ch)
  2713. {
  2714. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2715. /* report maximum channels */
  2716. ch->max_combined = ixgbe_max_channels(adapter);
  2717. /* report info for other vector */
  2718. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2719. ch->max_other = NON_Q_VECTORS;
  2720. ch->other_count = NON_Q_VECTORS;
  2721. }
  2722. /* record RSS queues */
  2723. ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
  2724. /* nothing else to report if RSS is disabled */
  2725. if (ch->combined_count == 1)
  2726. return;
  2727. /* we do not support ATR queueing if SR-IOV is enabled */
  2728. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  2729. return;
  2730. /* same thing goes for being DCB enabled */
  2731. if (adapter->hw_tcs > 1)
  2732. return;
  2733. /* if ATR is disabled we can exit */
  2734. if (!adapter->atr_sample_rate)
  2735. return;
  2736. /* report flow director queues as maximum channels */
  2737. ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
  2738. }
  2739. static int ixgbe_set_channels(struct net_device *dev,
  2740. struct ethtool_channels *ch)
  2741. {
  2742. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2743. unsigned int count = ch->combined_count;
  2744. u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
  2745. /* verify they are not requesting separate vectors */
  2746. if (!count || ch->rx_count || ch->tx_count)
  2747. return -EINVAL;
  2748. /* verify other_count has not changed */
  2749. if (ch->other_count != NON_Q_VECTORS)
  2750. return -EINVAL;
  2751. /* verify the number of channels does not exceed hardware limits */
  2752. if (count > ixgbe_max_channels(adapter))
  2753. return -EINVAL;
  2754. /* update feature limits from largest to smallest supported values */
  2755. adapter->ring_feature[RING_F_FDIR].limit = count;
  2756. /* cap RSS limit */
  2757. if (count > max_rss_indices)
  2758. count = max_rss_indices;
  2759. adapter->ring_feature[RING_F_RSS].limit = count;
  2760. #ifdef IXGBE_FCOE
  2761. /* cap FCoE limit at 8 */
  2762. if (count > IXGBE_FCRETA_SIZE)
  2763. count = IXGBE_FCRETA_SIZE;
  2764. adapter->ring_feature[RING_F_FCOE].limit = count;
  2765. #endif
  2766. /* use setup TC to update any traffic class queue mapping */
  2767. return ixgbe_setup_tc(dev, adapter->hw_tcs);
  2768. }
  2769. static int ixgbe_get_module_info(struct net_device *dev,
  2770. struct ethtool_modinfo *modinfo)
  2771. {
  2772. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2773. struct ixgbe_hw *hw = &adapter->hw;
  2774. s32 status;
  2775. u8 sff8472_rev, addr_mode;
  2776. bool page_swap = false;
  2777. if (hw->phy.type == ixgbe_phy_fw)
  2778. return -ENXIO;
  2779. /* Check whether we support SFF-8472 or not */
  2780. status = hw->phy.ops.read_i2c_eeprom(hw,
  2781. IXGBE_SFF_SFF_8472_COMP,
  2782. &sff8472_rev);
  2783. if (status)
  2784. return -EIO;
  2785. /* addressing mode is not supported */
  2786. status = hw->phy.ops.read_i2c_eeprom(hw,
  2787. IXGBE_SFF_SFF_8472_SWAP,
  2788. &addr_mode);
  2789. if (status)
  2790. return -EIO;
  2791. if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
  2792. e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
  2793. page_swap = true;
  2794. }
  2795. if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
  2796. /* We have a SFP, but it does not support SFF-8472 */
  2797. modinfo->type = ETH_MODULE_SFF_8079;
  2798. modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
  2799. } else {
  2800. /* We have a SFP which supports a revision of SFF-8472. */
  2801. modinfo->type = ETH_MODULE_SFF_8472;
  2802. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  2803. }
  2804. return 0;
  2805. }
  2806. static int ixgbe_get_module_eeprom(struct net_device *dev,
  2807. struct ethtool_eeprom *ee,
  2808. u8 *data)
  2809. {
  2810. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2811. struct ixgbe_hw *hw = &adapter->hw;
  2812. s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
  2813. u8 databyte = 0xFF;
  2814. int i = 0;
  2815. if (ee->len == 0)
  2816. return -EINVAL;
  2817. if (hw->phy.type == ixgbe_phy_fw)
  2818. return -ENXIO;
  2819. for (i = ee->offset; i < ee->offset + ee->len; i++) {
  2820. /* I2C reads can take long time */
  2821. if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  2822. return -EBUSY;
  2823. if (i < ETH_MODULE_SFF_8079_LEN)
  2824. status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
  2825. else
  2826. status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
  2827. if (status)
  2828. return -EIO;
  2829. data[i - ee->offset] = databyte;
  2830. }
  2831. return 0;
  2832. }
  2833. static const struct {
  2834. ixgbe_link_speed mac_speed;
  2835. u32 supported;
  2836. } ixgbe_ls_map[] = {
  2837. { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
  2838. { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
  2839. { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
  2840. { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
  2841. { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
  2842. };
  2843. static const struct {
  2844. u32 lp_advertised;
  2845. u32 mac_speed;
  2846. } ixgbe_lp_map[] = {
  2847. { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
  2848. { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
  2849. { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
  2850. { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
  2851. { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
  2852. { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
  2853. };
  2854. static int
  2855. ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
  2856. {
  2857. u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
  2858. struct ixgbe_hw *hw = &adapter->hw;
  2859. s32 rc;
  2860. u16 i;
  2861. rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
  2862. if (rc)
  2863. return rc;
  2864. edata->lp_advertised = 0;
  2865. for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
  2866. if (info[0] & ixgbe_lp_map[i].lp_advertised)
  2867. edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
  2868. }
  2869. edata->supported = 0;
  2870. for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
  2871. if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
  2872. edata->supported |= ixgbe_ls_map[i].supported;
  2873. }
  2874. edata->advertised = 0;
  2875. for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
  2876. if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
  2877. edata->advertised |= ixgbe_ls_map[i].supported;
  2878. }
  2879. edata->eee_enabled = !!edata->advertised;
  2880. edata->tx_lpi_enabled = edata->eee_enabled;
  2881. if (edata->advertised & edata->lp_advertised)
  2882. edata->eee_active = true;
  2883. return 0;
  2884. }
  2885. static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
  2886. {
  2887. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2888. struct ixgbe_hw *hw = &adapter->hw;
  2889. if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
  2890. return -EOPNOTSUPP;
  2891. if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
  2892. return ixgbe_get_eee_fw(adapter, edata);
  2893. return -EOPNOTSUPP;
  2894. }
  2895. static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
  2896. {
  2897. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2898. struct ixgbe_hw *hw = &adapter->hw;
  2899. struct ethtool_eee eee_data;
  2900. s32 ret_val;
  2901. if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
  2902. return -EOPNOTSUPP;
  2903. memset(&eee_data, 0, sizeof(struct ethtool_eee));
  2904. ret_val = ixgbe_get_eee(netdev, &eee_data);
  2905. if (ret_val)
  2906. return ret_val;
  2907. if (eee_data.eee_enabled && !edata->eee_enabled) {
  2908. if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
  2909. e_err(drv, "Setting EEE tx-lpi is not supported\n");
  2910. return -EINVAL;
  2911. }
  2912. if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
  2913. e_err(drv,
  2914. "Setting EEE Tx LPI timer is not supported\n");
  2915. return -EINVAL;
  2916. }
  2917. if (eee_data.advertised != edata->advertised) {
  2918. e_err(drv,
  2919. "Setting EEE advertised speeds is not supported\n");
  2920. return -EINVAL;
  2921. }
  2922. }
  2923. if (eee_data.eee_enabled != edata->eee_enabled) {
  2924. if (edata->eee_enabled) {
  2925. adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
  2926. hw->phy.eee_speeds_advertised =
  2927. hw->phy.eee_speeds_supported;
  2928. } else {
  2929. adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
  2930. hw->phy.eee_speeds_advertised = 0;
  2931. }
  2932. /* reset link */
  2933. if (netif_running(netdev))
  2934. ixgbe_reinit_locked(adapter);
  2935. else
  2936. ixgbe_reset(adapter);
  2937. }
  2938. return 0;
  2939. }
  2940. static u32 ixgbe_get_priv_flags(struct net_device *netdev)
  2941. {
  2942. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2943. u32 priv_flags = 0;
  2944. if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
  2945. priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
  2946. return priv_flags;
  2947. }
  2948. static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
  2949. {
  2950. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2951. unsigned int flags2 = adapter->flags2;
  2952. flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
  2953. if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
  2954. flags2 |= IXGBE_FLAG2_RX_LEGACY;
  2955. if (flags2 != adapter->flags2) {
  2956. adapter->flags2 = flags2;
  2957. /* reset interface to repopulate queues */
  2958. if (netif_running(netdev))
  2959. ixgbe_reinit_locked(adapter);
  2960. }
  2961. return 0;
  2962. }
  2963. static const struct ethtool_ops ixgbe_ethtool_ops = {
  2964. .get_drvinfo = ixgbe_get_drvinfo,
  2965. .get_regs_len = ixgbe_get_regs_len,
  2966. .get_regs = ixgbe_get_regs,
  2967. .get_wol = ixgbe_get_wol,
  2968. .set_wol = ixgbe_set_wol,
  2969. .nway_reset = ixgbe_nway_reset,
  2970. .get_link = ethtool_op_get_link,
  2971. .get_eeprom_len = ixgbe_get_eeprom_len,
  2972. .get_eeprom = ixgbe_get_eeprom,
  2973. .set_eeprom = ixgbe_set_eeprom,
  2974. .get_ringparam = ixgbe_get_ringparam,
  2975. .set_ringparam = ixgbe_set_ringparam,
  2976. .get_pauseparam = ixgbe_get_pauseparam,
  2977. .set_pauseparam = ixgbe_set_pauseparam,
  2978. .get_msglevel = ixgbe_get_msglevel,
  2979. .set_msglevel = ixgbe_set_msglevel,
  2980. .self_test = ixgbe_diag_test,
  2981. .get_strings = ixgbe_get_strings,
  2982. .set_phys_id = ixgbe_set_phys_id,
  2983. .get_sset_count = ixgbe_get_sset_count,
  2984. .get_ethtool_stats = ixgbe_get_ethtool_stats,
  2985. .get_coalesce = ixgbe_get_coalesce,
  2986. .set_coalesce = ixgbe_set_coalesce,
  2987. .get_rxnfc = ixgbe_get_rxnfc,
  2988. .set_rxnfc = ixgbe_set_rxnfc,
  2989. .get_rxfh_indir_size = ixgbe_rss_indir_size,
  2990. .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
  2991. .get_rxfh = ixgbe_get_rxfh,
  2992. .set_rxfh = ixgbe_set_rxfh,
  2993. .get_eee = ixgbe_get_eee,
  2994. .set_eee = ixgbe_set_eee,
  2995. .get_channels = ixgbe_get_channels,
  2996. .set_channels = ixgbe_set_channels,
  2997. .get_priv_flags = ixgbe_get_priv_flags,
  2998. .set_priv_flags = ixgbe_set_priv_flags,
  2999. .get_ts_info = ixgbe_get_ts_info,
  3000. .get_module_info = ixgbe_get_module_info,
  3001. .get_module_eeprom = ixgbe_get_module_eeprom,
  3002. .get_link_ksettings = ixgbe_get_link_ksettings,
  3003. .set_link_ksettings = ixgbe_set_link_ksettings,
  3004. };
  3005. void ixgbe_set_ethtool_ops(struct net_device *netdev)
  3006. {
  3007. netdev->ethtool_ops = &ixgbe_ethtool_ops;
  3008. }