en_ethtool.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/mlx4/driver.h>
  37. #include <linux/mlx4/device.h>
  38. #include <linux/in.h>
  39. #include <net/ip.h>
  40. #include <linux/bitmap.h>
  41. #include "mlx4_en.h"
  42. #include "en_port.h"
  43. #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
  44. #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
  45. #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
  46. static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
  47. {
  48. int i, t;
  49. int err = 0;
  50. for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
  51. for (i = 0; i < priv->tx_ring_num[t]; i++) {
  52. priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
  53. priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
  54. if (priv->port_up) {
  55. err = mlx4_en_set_cq_moder(priv,
  56. priv->tx_cq[t][i]);
  57. if (err)
  58. return err;
  59. }
  60. }
  61. }
  62. if (priv->adaptive_rx_coal)
  63. return 0;
  64. for (i = 0; i < priv->rx_ring_num; i++) {
  65. priv->rx_cq[i]->moder_cnt = priv->rx_frames;
  66. priv->rx_cq[i]->moder_time = priv->rx_usecs;
  67. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  68. if (priv->port_up) {
  69. err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
  70. if (err)
  71. return err;
  72. }
  73. }
  74. return err;
  75. }
  76. static void
  77. mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
  78. {
  79. struct mlx4_en_priv *priv = netdev_priv(dev);
  80. struct mlx4_en_dev *mdev = priv->mdev;
  81. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  82. strlcpy(drvinfo->version, DRV_VERSION,
  83. sizeof(drvinfo->version));
  84. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  85. "%d.%d.%d",
  86. (u16) (mdev->dev->caps.fw_ver >> 32),
  87. (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
  88. (u16) (mdev->dev->caps.fw_ver & 0xffff));
  89. strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
  90. sizeof(drvinfo->bus_info));
  91. }
  92. static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
  93. "blueflame",
  94. "phv-bit"
  95. };
  96. static const char main_strings[][ETH_GSTRING_LEN] = {
  97. /* main statistics */
  98. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  99. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  100. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  101. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  102. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  103. "tx_heartbeat_errors", "tx_window_errors",
  104. /* port statistics */
  105. "tso_packets",
  106. "xmit_more",
  107. "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
  108. "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
  109. /* pf statistics */
  110. "pf_rx_packets",
  111. "pf_rx_bytes",
  112. "pf_tx_packets",
  113. "pf_tx_bytes",
  114. /* priority flow control statistics rx */
  115. "rx_pause_prio_0", "rx_pause_duration_prio_0",
  116. "rx_pause_transition_prio_0",
  117. "rx_pause_prio_1", "rx_pause_duration_prio_1",
  118. "rx_pause_transition_prio_1",
  119. "rx_pause_prio_2", "rx_pause_duration_prio_2",
  120. "rx_pause_transition_prio_2",
  121. "rx_pause_prio_3", "rx_pause_duration_prio_3",
  122. "rx_pause_transition_prio_3",
  123. "rx_pause_prio_4", "rx_pause_duration_prio_4",
  124. "rx_pause_transition_prio_4",
  125. "rx_pause_prio_5", "rx_pause_duration_prio_5",
  126. "rx_pause_transition_prio_5",
  127. "rx_pause_prio_6", "rx_pause_duration_prio_6",
  128. "rx_pause_transition_prio_6",
  129. "rx_pause_prio_7", "rx_pause_duration_prio_7",
  130. "rx_pause_transition_prio_7",
  131. /* flow control statistics rx */
  132. "rx_pause", "rx_pause_duration", "rx_pause_transition",
  133. /* priority flow control statistics tx */
  134. "tx_pause_prio_0", "tx_pause_duration_prio_0",
  135. "tx_pause_transition_prio_0",
  136. "tx_pause_prio_1", "tx_pause_duration_prio_1",
  137. "tx_pause_transition_prio_1",
  138. "tx_pause_prio_2", "tx_pause_duration_prio_2",
  139. "tx_pause_transition_prio_2",
  140. "tx_pause_prio_3", "tx_pause_duration_prio_3",
  141. "tx_pause_transition_prio_3",
  142. "tx_pause_prio_4", "tx_pause_duration_prio_4",
  143. "tx_pause_transition_prio_4",
  144. "tx_pause_prio_5", "tx_pause_duration_prio_5",
  145. "tx_pause_transition_prio_5",
  146. "tx_pause_prio_6", "tx_pause_duration_prio_6",
  147. "tx_pause_transition_prio_6",
  148. "tx_pause_prio_7", "tx_pause_duration_prio_7",
  149. "tx_pause_transition_prio_7",
  150. /* flow control statistics tx */
  151. "tx_pause", "tx_pause_duration", "tx_pause_transition",
  152. /* packet statistics */
  153. "rx_multicast_packets",
  154. "rx_broadcast_packets",
  155. "rx_jabbers",
  156. "rx_in_range_length_error",
  157. "rx_out_range_length_error",
  158. "tx_multicast_packets",
  159. "tx_broadcast_packets",
  160. "rx_prio_0_packets", "rx_prio_0_bytes",
  161. "rx_prio_1_packets", "rx_prio_1_bytes",
  162. "rx_prio_2_packets", "rx_prio_2_bytes",
  163. "rx_prio_3_packets", "rx_prio_3_bytes",
  164. "rx_prio_4_packets", "rx_prio_4_bytes",
  165. "rx_prio_5_packets", "rx_prio_5_bytes",
  166. "rx_prio_6_packets", "rx_prio_6_bytes",
  167. "rx_prio_7_packets", "rx_prio_7_bytes",
  168. "rx_novlan_packets", "rx_novlan_bytes",
  169. "tx_prio_0_packets", "tx_prio_0_bytes",
  170. "tx_prio_1_packets", "tx_prio_1_bytes",
  171. "tx_prio_2_packets", "tx_prio_2_bytes",
  172. "tx_prio_3_packets", "tx_prio_3_bytes",
  173. "tx_prio_4_packets", "tx_prio_4_bytes",
  174. "tx_prio_5_packets", "tx_prio_5_bytes",
  175. "tx_prio_6_packets", "tx_prio_6_bytes",
  176. "tx_prio_7_packets", "tx_prio_7_bytes",
  177. "tx_novlan_packets", "tx_novlan_bytes",
  178. /* xdp statistics */
  179. "rx_xdp_drop",
  180. "rx_xdp_tx",
  181. "rx_xdp_tx_full",
  182. };
  183. static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
  184. "Interrupt Test",
  185. "Link Test",
  186. "Speed Test",
  187. "Register Test",
  188. "Loopback Test",
  189. };
  190. static u32 mlx4_en_get_msglevel(struct net_device *dev)
  191. {
  192. return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
  193. }
  194. static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
  195. {
  196. ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
  197. }
  198. static void mlx4_en_get_wol(struct net_device *netdev,
  199. struct ethtool_wolinfo *wol)
  200. {
  201. struct mlx4_en_priv *priv = netdev_priv(netdev);
  202. struct mlx4_caps *caps = &priv->mdev->dev->caps;
  203. int err = 0;
  204. u64 config = 0;
  205. u64 mask;
  206. if ((priv->port < 1) || (priv->port > 2)) {
  207. en_err(priv, "Failed to get WoL information\n");
  208. return;
  209. }
  210. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  211. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  212. if (!(caps->flags & mask)) {
  213. wol->supported = 0;
  214. wol->wolopts = 0;
  215. return;
  216. }
  217. if (caps->wol_port[priv->port])
  218. wol->supported = WAKE_MAGIC;
  219. else
  220. wol->supported = 0;
  221. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  222. if (err) {
  223. en_err(priv, "Failed to get WoL information\n");
  224. return;
  225. }
  226. if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
  227. wol->wolopts = WAKE_MAGIC;
  228. else
  229. wol->wolopts = 0;
  230. }
  231. static int mlx4_en_set_wol(struct net_device *netdev,
  232. struct ethtool_wolinfo *wol)
  233. {
  234. struct mlx4_en_priv *priv = netdev_priv(netdev);
  235. u64 config = 0;
  236. int err = 0;
  237. u64 mask;
  238. if ((priv->port < 1) || (priv->port > 2))
  239. return -EOPNOTSUPP;
  240. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  241. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  242. if (!(priv->mdev->dev->caps.flags & mask))
  243. return -EOPNOTSUPP;
  244. if (wol->supported & ~WAKE_MAGIC)
  245. return -EINVAL;
  246. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  247. if (err) {
  248. en_err(priv, "Failed to get WoL info, unable to modify\n");
  249. return err;
  250. }
  251. if (wol->wolopts & WAKE_MAGIC) {
  252. config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
  253. MLX4_EN_WOL_MAGIC;
  254. } else {
  255. config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
  256. config |= MLX4_EN_WOL_DO_MODIFY;
  257. }
  258. err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
  259. if (err)
  260. en_err(priv, "Failed to set WoL information\n");
  261. return err;
  262. }
  263. struct bitmap_iterator {
  264. unsigned long *stats_bitmap;
  265. unsigned int count;
  266. unsigned int iterator;
  267. bool advance_array; /* if set, force no increments */
  268. };
  269. static inline void bitmap_iterator_init(struct bitmap_iterator *h,
  270. unsigned long *stats_bitmap,
  271. int count)
  272. {
  273. h->iterator = 0;
  274. h->advance_array = !bitmap_empty(stats_bitmap, count);
  275. h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
  276. : count;
  277. h->stats_bitmap = stats_bitmap;
  278. }
  279. static inline int bitmap_iterator_test(struct bitmap_iterator *h)
  280. {
  281. return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
  282. }
  283. static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
  284. {
  285. return h->iterator++;
  286. }
  287. static inline unsigned int
  288. bitmap_iterator_count(struct bitmap_iterator *h)
  289. {
  290. return h->count;
  291. }
  292. static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
  293. {
  294. struct mlx4_en_priv *priv = netdev_priv(dev);
  295. struct bitmap_iterator it;
  296. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  297. switch (sset) {
  298. case ETH_SS_STATS:
  299. return bitmap_iterator_count(&it) +
  300. (priv->tx_ring_num[TX] * 2) +
  301. (priv->rx_ring_num * (3 + NUM_XDP_STATS));
  302. case ETH_SS_TEST:
  303. return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
  304. & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
  305. case ETH_SS_PRIV_FLAGS:
  306. return ARRAY_SIZE(mlx4_en_priv_flags);
  307. default:
  308. return -EOPNOTSUPP;
  309. }
  310. }
  311. static void mlx4_en_get_ethtool_stats(struct net_device *dev,
  312. struct ethtool_stats *stats, uint64_t *data)
  313. {
  314. struct mlx4_en_priv *priv = netdev_priv(dev);
  315. int index = 0;
  316. int i;
  317. struct bitmap_iterator it;
  318. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  319. spin_lock_bh(&priv->stats_lock);
  320. mlx4_en_fold_software_stats(dev);
  321. for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
  322. if (bitmap_iterator_test(&it))
  323. data[index++] = ((unsigned long *)&dev->stats)[i];
  324. for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
  325. if (bitmap_iterator_test(&it))
  326. data[index++] = ((unsigned long *)&priv->port_stats)[i];
  327. for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
  328. if (bitmap_iterator_test(&it))
  329. data[index++] =
  330. ((unsigned long *)&priv->pf_stats)[i];
  331. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
  332. i++, bitmap_iterator_inc(&it))
  333. if (bitmap_iterator_test(&it))
  334. data[index++] =
  335. ((u64 *)&priv->rx_priority_flowstats)[i];
  336. for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
  337. if (bitmap_iterator_test(&it))
  338. data[index++] = ((u64 *)&priv->rx_flowstats)[i];
  339. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
  340. i++, bitmap_iterator_inc(&it))
  341. if (bitmap_iterator_test(&it))
  342. data[index++] =
  343. ((u64 *)&priv->tx_priority_flowstats)[i];
  344. for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
  345. if (bitmap_iterator_test(&it))
  346. data[index++] = ((u64 *)&priv->tx_flowstats)[i];
  347. for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
  348. if (bitmap_iterator_test(&it))
  349. data[index++] = ((unsigned long *)&priv->pkstats)[i];
  350. for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
  351. if (bitmap_iterator_test(&it))
  352. data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
  353. for (i = 0; i < priv->tx_ring_num[TX]; i++) {
  354. data[index++] = priv->tx_ring[TX][i]->packets;
  355. data[index++] = priv->tx_ring[TX][i]->bytes;
  356. }
  357. for (i = 0; i < priv->rx_ring_num; i++) {
  358. data[index++] = priv->rx_ring[i]->packets;
  359. data[index++] = priv->rx_ring[i]->bytes;
  360. data[index++] = priv->rx_ring[i]->dropped;
  361. data[index++] = priv->rx_ring[i]->xdp_drop;
  362. data[index++] = priv->rx_ring[i]->xdp_tx;
  363. data[index++] = priv->rx_ring[i]->xdp_tx_full;
  364. }
  365. spin_unlock_bh(&priv->stats_lock);
  366. }
  367. static void mlx4_en_self_test(struct net_device *dev,
  368. struct ethtool_test *etest, u64 *buf)
  369. {
  370. mlx4_en_ex_selftest(dev, &etest->flags, buf);
  371. }
  372. static void mlx4_en_get_strings(struct net_device *dev,
  373. uint32_t stringset, uint8_t *data)
  374. {
  375. struct mlx4_en_priv *priv = netdev_priv(dev);
  376. int index = 0;
  377. int i, strings = 0;
  378. struct bitmap_iterator it;
  379. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  380. switch (stringset) {
  381. case ETH_SS_TEST:
  382. for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
  383. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  384. if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
  385. for (; i < MLX4_EN_NUM_SELF_TEST; i++)
  386. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  387. break;
  388. case ETH_SS_STATS:
  389. /* Add main counters */
  390. for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
  391. bitmap_iterator_inc(&it))
  392. if (bitmap_iterator_test(&it))
  393. strcpy(data + (index++) * ETH_GSTRING_LEN,
  394. main_strings[strings]);
  395. for (i = 0; i < NUM_PORT_STATS; i++, strings++,
  396. bitmap_iterator_inc(&it))
  397. if (bitmap_iterator_test(&it))
  398. strcpy(data + (index++) * ETH_GSTRING_LEN,
  399. main_strings[strings]);
  400. for (i = 0; i < NUM_PF_STATS; i++, strings++,
  401. bitmap_iterator_inc(&it))
  402. if (bitmap_iterator_test(&it))
  403. strcpy(data + (index++) * ETH_GSTRING_LEN,
  404. main_strings[strings]);
  405. for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
  406. bitmap_iterator_inc(&it))
  407. if (bitmap_iterator_test(&it))
  408. strcpy(data + (index++) * ETH_GSTRING_LEN,
  409. main_strings[strings]);
  410. for (i = 0; i < NUM_PKT_STATS; i++, strings++,
  411. bitmap_iterator_inc(&it))
  412. if (bitmap_iterator_test(&it))
  413. strcpy(data + (index++) * ETH_GSTRING_LEN,
  414. main_strings[strings]);
  415. for (i = 0; i < NUM_XDP_STATS; i++, strings++,
  416. bitmap_iterator_inc(&it))
  417. if (bitmap_iterator_test(&it))
  418. strcpy(data + (index++) * ETH_GSTRING_LEN,
  419. main_strings[strings]);
  420. for (i = 0; i < priv->tx_ring_num[TX]; i++) {
  421. sprintf(data + (index++) * ETH_GSTRING_LEN,
  422. "tx%d_packets", i);
  423. sprintf(data + (index++) * ETH_GSTRING_LEN,
  424. "tx%d_bytes", i);
  425. }
  426. for (i = 0; i < priv->rx_ring_num; i++) {
  427. sprintf(data + (index++) * ETH_GSTRING_LEN,
  428. "rx%d_packets", i);
  429. sprintf(data + (index++) * ETH_GSTRING_LEN,
  430. "rx%d_bytes", i);
  431. sprintf(data + (index++) * ETH_GSTRING_LEN,
  432. "rx%d_dropped", i);
  433. sprintf(data + (index++) * ETH_GSTRING_LEN,
  434. "rx%d_xdp_drop", i);
  435. sprintf(data + (index++) * ETH_GSTRING_LEN,
  436. "rx%d_xdp_tx", i);
  437. sprintf(data + (index++) * ETH_GSTRING_LEN,
  438. "rx%d_xdp_tx_full", i);
  439. }
  440. break;
  441. case ETH_SS_PRIV_FLAGS:
  442. for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
  443. strcpy(data + i * ETH_GSTRING_LEN,
  444. mlx4_en_priv_flags[i]);
  445. break;
  446. }
  447. }
  448. static u32 mlx4_en_autoneg_get(struct net_device *dev)
  449. {
  450. struct mlx4_en_priv *priv = netdev_priv(dev);
  451. struct mlx4_en_dev *mdev = priv->mdev;
  452. u32 autoneg = AUTONEG_DISABLE;
  453. if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
  454. (priv->port_state.flags & MLX4_EN_PORT_ANE))
  455. autoneg = AUTONEG_ENABLE;
  456. return autoneg;
  457. }
  458. static void ptys2ethtool_update_supported_port(unsigned long *mask,
  459. struct mlx4_ptys_reg *ptys_reg)
  460. {
  461. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  462. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  463. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  464. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  465. __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
  466. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  467. | MLX4_PROT_MASK(MLX4_10GBASE_SR)
  468. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  469. | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
  470. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  471. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  472. __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
  473. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  474. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  475. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  476. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  477. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  478. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  479. __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
  480. }
  481. }
  482. static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
  483. {
  484. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
  485. if (!eth_proto) /* link down */
  486. eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  487. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  488. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  489. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  490. return PORT_TP;
  491. }
  492. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
  493. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  494. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  495. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  496. return PORT_FIBRE;
  497. }
  498. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  499. | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
  500. | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
  501. return PORT_DA;
  502. }
  503. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  504. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  505. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  506. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  507. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  508. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  509. return PORT_NONE;
  510. }
  511. return PORT_OTHER;
  512. }
  513. #define MLX4_LINK_MODES_SZ \
  514. (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
  515. enum ethtool_report {
  516. SUPPORTED = 0,
  517. ADVERTISED = 1,
  518. };
  519. struct ptys2ethtool_config {
  520. __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
  521. __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
  522. u32 speed;
  523. };
  524. static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
  525. enum ethtool_report report)
  526. {
  527. switch (report) {
  528. case SUPPORTED:
  529. return cfg->supported;
  530. case ADVERTISED:
  531. return cfg->advertised;
  532. }
  533. return NULL;
  534. }
  535. #define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
  536. ({ \
  537. struct ptys2ethtool_config *cfg; \
  538. const unsigned int modes[] = { __VA_ARGS__ }; \
  539. unsigned int i; \
  540. cfg = &ptys2ethtool_map[reg_]; \
  541. cfg->speed = speed_; \
  542. bitmap_zero(cfg->supported, \
  543. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  544. bitmap_zero(cfg->advertised, \
  545. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  546. for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
  547. __set_bit(modes[i], cfg->supported); \
  548. __set_bit(modes[i], cfg->advertised); \
  549. } \
  550. })
  551. /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
  552. static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ];
  553. void __init mlx4_en_init_ptys2ethtool_map(void)
  554. {
  555. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100,
  556. ETHTOOL_LINK_MODE_100baseT_Full_BIT);
  557. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
  558. ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
  559. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
  560. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  561. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
  562. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  563. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
  564. ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
  565. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000,
  566. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  567. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000,
  568. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  569. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
  570. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  571. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
  572. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  573. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
  574. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  575. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
  576. ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
  577. ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
  578. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000,
  579. ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
  580. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000,
  581. ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
  582. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000,
  583. ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
  584. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000,
  585. ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
  586. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000,
  587. ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT);
  588. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
  589. ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
  590. };
  591. static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
  592. u32 eth_proto,
  593. enum ethtool_report report)
  594. {
  595. int i;
  596. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  597. if (eth_proto & MLX4_PROT_MASK(i))
  598. bitmap_or(link_modes, link_modes,
  599. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  600. report),
  601. __ETHTOOL_LINK_MODE_MASK_NBITS);
  602. }
  603. }
  604. static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
  605. enum ethtool_report report)
  606. {
  607. int i;
  608. u32 ptys_modes = 0;
  609. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  610. if (bitmap_intersects(
  611. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  612. report),
  613. link_modes,
  614. __ETHTOOL_LINK_MODE_MASK_NBITS))
  615. ptys_modes |= 1 << i;
  616. }
  617. return ptys_modes;
  618. }
  619. /* Convert actual speed (SPEED_XXX) to ptys link modes */
  620. static u32 speed2ptys_link_modes(u32 speed)
  621. {
  622. int i;
  623. u32 ptys_modes = 0;
  624. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  625. if (ptys2ethtool_map[i].speed == speed)
  626. ptys_modes |= 1 << i;
  627. }
  628. return ptys_modes;
  629. }
  630. static int
  631. ethtool_get_ptys_link_ksettings(struct net_device *dev,
  632. struct ethtool_link_ksettings *link_ksettings)
  633. {
  634. struct mlx4_en_priv *priv = netdev_priv(dev);
  635. struct mlx4_ptys_reg ptys_reg;
  636. u32 eth_proto;
  637. int ret;
  638. memset(&ptys_reg, 0, sizeof(ptys_reg));
  639. ptys_reg.local_port = priv->port;
  640. ptys_reg.proto_mask = MLX4_PTYS_EN;
  641. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  642. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  643. if (ret) {
  644. en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
  645. ret);
  646. return ret;
  647. }
  648. en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
  649. ptys_reg.proto_mask);
  650. en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
  651. be32_to_cpu(ptys_reg.eth_proto_cap));
  652. en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
  653. be32_to_cpu(ptys_reg.eth_proto_admin));
  654. en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
  655. be32_to_cpu(ptys_reg.eth_proto_oper));
  656. en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
  657. be32_to_cpu(ptys_reg.eth_proto_lp_adv));
  658. /* reset supported/advertising masks */
  659. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  660. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  661. ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported,
  662. &ptys_reg);
  663. eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
  664. ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported,
  665. eth_proto, SUPPORTED);
  666. eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
  667. ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising,
  668. eth_proto, ADVERTISED);
  669. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  670. Pause);
  671. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  672. Asym_Pause);
  673. if (priv->prof->tx_pause)
  674. ethtool_link_ksettings_add_link_mode(link_ksettings,
  675. advertising, Pause);
  676. if (priv->prof->tx_pause ^ priv->prof->rx_pause)
  677. ethtool_link_ksettings_add_link_mode(link_ksettings,
  678. advertising, Asym_Pause);
  679. link_ksettings->base.port = ptys_get_active_port(&ptys_reg);
  680. if (mlx4_en_autoneg_get(dev)) {
  681. ethtool_link_ksettings_add_link_mode(link_ksettings,
  682. supported, Autoneg);
  683. ethtool_link_ksettings_add_link_mode(link_ksettings,
  684. advertising, Autoneg);
  685. }
  686. link_ksettings->base.autoneg
  687. = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  688. AUTONEG_ENABLE : AUTONEG_DISABLE;
  689. eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
  690. ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
  691. ptys2ethtool_update_link_modes(
  692. link_ksettings->link_modes.lp_advertising,
  693. eth_proto, ADVERTISED);
  694. if (priv->port_state.flags & MLX4_EN_PORT_ANC)
  695. ethtool_link_ksettings_add_link_mode(link_ksettings,
  696. lp_advertising, Autoneg);
  697. link_ksettings->base.phy_address = 0;
  698. link_ksettings->base.mdio_support = 0;
  699. link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
  700. link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
  701. return ret;
  702. }
  703. static void
  704. ethtool_get_default_link_ksettings(
  705. struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
  706. {
  707. struct mlx4_en_priv *priv = netdev_priv(dev);
  708. int trans_type;
  709. link_ksettings->base.autoneg = AUTONEG_DISABLE;
  710. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  711. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  712. 10000baseT_Full);
  713. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  714. ethtool_link_ksettings_add_link_mode(link_ksettings, advertising,
  715. 10000baseT_Full);
  716. trans_type = priv->port_state.transceiver;
  717. if (trans_type > 0 && trans_type <= 0xC) {
  718. link_ksettings->base.port = PORT_FIBRE;
  719. ethtool_link_ksettings_add_link_mode(link_ksettings,
  720. supported, FIBRE);
  721. ethtool_link_ksettings_add_link_mode(link_ksettings,
  722. advertising, FIBRE);
  723. } else if (trans_type == 0x80 || trans_type == 0) {
  724. link_ksettings->base.port = PORT_TP;
  725. ethtool_link_ksettings_add_link_mode(link_ksettings,
  726. supported, TP);
  727. ethtool_link_ksettings_add_link_mode(link_ksettings,
  728. advertising, TP);
  729. } else {
  730. link_ksettings->base.port = -1;
  731. }
  732. }
  733. static int
  734. mlx4_en_get_link_ksettings(struct net_device *dev,
  735. struct ethtool_link_ksettings *link_ksettings)
  736. {
  737. struct mlx4_en_priv *priv = netdev_priv(dev);
  738. int ret = -EINVAL;
  739. if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
  740. return -ENOMEM;
  741. en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
  742. priv->port_state.flags & MLX4_EN_PORT_ANC,
  743. priv->port_state.flags & MLX4_EN_PORT_ANE);
  744. if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
  745. ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
  746. if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
  747. ethtool_get_default_link_ksettings(dev, link_ksettings);
  748. if (netif_carrier_ok(dev)) {
  749. link_ksettings->base.speed = priv->port_state.link_speed;
  750. link_ksettings->base.duplex = DUPLEX_FULL;
  751. } else {
  752. link_ksettings->base.speed = SPEED_UNKNOWN;
  753. link_ksettings->base.duplex = DUPLEX_UNKNOWN;
  754. }
  755. return 0;
  756. }
  757. /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
  758. static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
  759. __be32 proto_cap)
  760. {
  761. __be32 proto_admin = 0;
  762. if (!speed) { /* Speed = 0 ==> Reset Link modes */
  763. proto_admin = proto_cap;
  764. en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
  765. be32_to_cpu(proto_cap));
  766. } else {
  767. u32 ptys_link_modes = speed2ptys_link_modes(speed);
  768. proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
  769. en_info(priv, "Setting Speed to %d\n", speed);
  770. }
  771. return proto_admin;
  772. }
  773. static int
  774. mlx4_en_set_link_ksettings(struct net_device *dev,
  775. const struct ethtool_link_ksettings *link_ksettings)
  776. {
  777. struct mlx4_en_priv *priv = netdev_priv(dev);
  778. struct mlx4_ptys_reg ptys_reg;
  779. __be32 proto_admin;
  780. u8 cur_autoneg;
  781. int ret;
  782. u32 ptys_adv = ethtool2ptys_link_modes(
  783. link_ksettings->link_modes.advertising, ADVERTISED);
  784. const int speed = link_ksettings->base.speed;
  785. en_dbg(DRV, priv,
  786. "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n",
  787. speed, __ETHTOOL_LINK_MODE_MASK_NBITS,
  788. link_ksettings->link_modes.advertising,
  789. link_ksettings->base.autoneg,
  790. link_ksettings->base.duplex);
  791. if (!(priv->mdev->dev->caps.flags2 &
  792. MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
  793. (link_ksettings->base.duplex == DUPLEX_HALF))
  794. return -EINVAL;
  795. memset(&ptys_reg, 0, sizeof(ptys_reg));
  796. ptys_reg.local_port = priv->port;
  797. ptys_reg.proto_mask = MLX4_PTYS_EN;
  798. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  799. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  800. if (ret) {
  801. en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
  802. ret);
  803. return 0;
  804. }
  805. cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
  806. AUTONEG_DISABLE : AUTONEG_ENABLE;
  807. if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
  808. proto_admin = speed_set_ptys_admin(priv, speed,
  809. ptys_reg.eth_proto_cap);
  810. if ((be32_to_cpu(proto_admin) &
  811. (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
  812. MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
  813. (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
  814. ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
  815. } else {
  816. proto_admin = cpu_to_be32(ptys_adv);
  817. ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
  818. }
  819. proto_admin &= ptys_reg.eth_proto_cap;
  820. if (!proto_admin) {
  821. en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
  822. return -EINVAL; /* nothing to change due to bad input */
  823. }
  824. if ((proto_admin == ptys_reg.eth_proto_admin) &&
  825. ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
  826. (link_ksettings->base.autoneg == cur_autoneg)))
  827. return 0; /* Nothing to change */
  828. en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
  829. be32_to_cpu(proto_admin));
  830. ptys_reg.eth_proto_admin = proto_admin;
  831. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
  832. &ptys_reg);
  833. if (ret) {
  834. en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
  835. be32_to_cpu(ptys_reg.eth_proto_admin), ret);
  836. return ret;
  837. }
  838. mutex_lock(&priv->mdev->state_lock);
  839. if (priv->port_up) {
  840. en_warn(priv, "Port link mode changed, restarting port...\n");
  841. mlx4_en_stop_port(dev, 1);
  842. if (mlx4_en_start_port(dev))
  843. en_err(priv, "Failed restarting port %d\n", priv->port);
  844. }
  845. mutex_unlock(&priv->mdev->state_lock);
  846. return 0;
  847. }
  848. static int mlx4_en_get_coalesce(struct net_device *dev,
  849. struct ethtool_coalesce *coal)
  850. {
  851. struct mlx4_en_priv *priv = netdev_priv(dev);
  852. coal->tx_coalesce_usecs = priv->tx_usecs;
  853. coal->tx_max_coalesced_frames = priv->tx_frames;
  854. coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
  855. coal->rx_coalesce_usecs = priv->rx_usecs;
  856. coal->rx_max_coalesced_frames = priv->rx_frames;
  857. coal->pkt_rate_low = priv->pkt_rate_low;
  858. coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
  859. coal->pkt_rate_high = priv->pkt_rate_high;
  860. coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
  861. coal->rate_sample_interval = priv->sample_interval;
  862. coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
  863. return 0;
  864. }
  865. static int mlx4_en_set_coalesce(struct net_device *dev,
  866. struct ethtool_coalesce *coal)
  867. {
  868. struct mlx4_en_priv *priv = netdev_priv(dev);
  869. if (!coal->tx_max_coalesced_frames_irq)
  870. return -EINVAL;
  871. priv->rx_frames = (coal->rx_max_coalesced_frames ==
  872. MLX4_EN_AUTO_CONF) ?
  873. MLX4_EN_RX_COAL_TARGET :
  874. coal->rx_max_coalesced_frames;
  875. priv->rx_usecs = (coal->rx_coalesce_usecs ==
  876. MLX4_EN_AUTO_CONF) ?
  877. MLX4_EN_RX_COAL_TIME :
  878. coal->rx_coalesce_usecs;
  879. /* Setting TX coalescing parameters */
  880. if (coal->tx_coalesce_usecs != priv->tx_usecs ||
  881. coal->tx_max_coalesced_frames != priv->tx_frames) {
  882. priv->tx_usecs = coal->tx_coalesce_usecs;
  883. priv->tx_frames = coal->tx_max_coalesced_frames;
  884. }
  885. /* Set adaptive coalescing params */
  886. priv->pkt_rate_low = coal->pkt_rate_low;
  887. priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
  888. priv->pkt_rate_high = coal->pkt_rate_high;
  889. priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
  890. priv->sample_interval = coal->rate_sample_interval;
  891. priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
  892. priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
  893. return mlx4_en_moderation_update(priv);
  894. }
  895. static int mlx4_en_set_pauseparam(struct net_device *dev,
  896. struct ethtool_pauseparam *pause)
  897. {
  898. struct mlx4_en_priv *priv = netdev_priv(dev);
  899. struct mlx4_en_dev *mdev = priv->mdev;
  900. int err;
  901. if (pause->autoneg)
  902. return -EINVAL;
  903. priv->prof->tx_pause = pause->tx_pause != 0;
  904. priv->prof->rx_pause = pause->rx_pause != 0;
  905. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  906. priv->rx_skb_size + ETH_FCS_LEN,
  907. priv->prof->tx_pause,
  908. priv->prof->tx_ppp,
  909. priv->prof->rx_pause,
  910. priv->prof->rx_ppp);
  911. if (err)
  912. en_err(priv, "Failed setting pause params\n");
  913. else
  914. mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
  915. priv->prof->rx_ppp,
  916. priv->prof->rx_pause,
  917. priv->prof->tx_ppp,
  918. priv->prof->tx_pause);
  919. return err;
  920. }
  921. static void mlx4_en_get_pauseparam(struct net_device *dev,
  922. struct ethtool_pauseparam *pause)
  923. {
  924. struct mlx4_en_priv *priv = netdev_priv(dev);
  925. pause->tx_pause = priv->prof->tx_pause;
  926. pause->rx_pause = priv->prof->rx_pause;
  927. }
  928. static int mlx4_en_set_ringparam(struct net_device *dev,
  929. struct ethtool_ringparam *param)
  930. {
  931. struct mlx4_en_priv *priv = netdev_priv(dev);
  932. struct mlx4_en_dev *mdev = priv->mdev;
  933. struct mlx4_en_port_profile new_prof;
  934. struct mlx4_en_priv *tmp;
  935. u32 rx_size, tx_size;
  936. int port_up = 0;
  937. int err = 0;
  938. if (param->rx_jumbo_pending || param->rx_mini_pending)
  939. return -EINVAL;
  940. rx_size = roundup_pow_of_two(param->rx_pending);
  941. rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
  942. rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
  943. tx_size = roundup_pow_of_two(param->tx_pending);
  944. tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
  945. tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
  946. if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
  947. priv->rx_ring[0]->size) &&
  948. tx_size == priv->tx_ring[TX][0]->size)
  949. return 0;
  950. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  951. if (!tmp)
  952. return -ENOMEM;
  953. mutex_lock(&mdev->state_lock);
  954. memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
  955. new_prof.tx_ring_size = tx_size;
  956. new_prof.rx_ring_size = rx_size;
  957. err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
  958. if (err)
  959. goto out;
  960. if (priv->port_up) {
  961. port_up = 1;
  962. mlx4_en_stop_port(dev, 1);
  963. }
  964. mlx4_en_safe_replace_resources(priv, tmp);
  965. if (port_up) {
  966. err = mlx4_en_start_port(dev);
  967. if (err)
  968. en_err(priv, "Failed starting port\n");
  969. }
  970. err = mlx4_en_moderation_update(priv);
  971. out:
  972. kfree(tmp);
  973. mutex_unlock(&mdev->state_lock);
  974. return err;
  975. }
  976. static void mlx4_en_get_ringparam(struct net_device *dev,
  977. struct ethtool_ringparam *param)
  978. {
  979. struct mlx4_en_priv *priv = netdev_priv(dev);
  980. memset(param, 0, sizeof(*param));
  981. param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
  982. param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
  983. param->rx_pending = priv->port_up ?
  984. priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
  985. param->tx_pending = priv->tx_ring[TX][0]->size;
  986. }
  987. static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
  988. {
  989. struct mlx4_en_priv *priv = netdev_priv(dev);
  990. return rounddown_pow_of_two(priv->rx_ring_num);
  991. }
  992. static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
  993. {
  994. return MLX4_EN_RSS_KEY_SIZE;
  995. }
  996. static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
  997. {
  998. struct mlx4_en_priv *priv = netdev_priv(dev);
  999. /* check if requested function is supported by the device */
  1000. if (hfunc == ETH_RSS_HASH_TOP) {
  1001. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
  1002. return -EINVAL;
  1003. if (!(dev->features & NETIF_F_RXHASH))
  1004. en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
  1005. return 0;
  1006. } else if (hfunc == ETH_RSS_HASH_XOR) {
  1007. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
  1008. return -EINVAL;
  1009. if (dev->features & NETIF_F_RXHASH)
  1010. en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
  1011. return 0;
  1012. }
  1013. return -EINVAL;
  1014. }
  1015. static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
  1016. u8 *hfunc)
  1017. {
  1018. struct mlx4_en_priv *priv = netdev_priv(dev);
  1019. u32 n = mlx4_en_get_rxfh_indir_size(dev);
  1020. u32 i, rss_rings;
  1021. int err = 0;
  1022. rss_rings = priv->prof->rss_rings ?: n;
  1023. rss_rings = rounddown_pow_of_two(rss_rings);
  1024. for (i = 0; i < n; i++) {
  1025. if (!ring_index)
  1026. break;
  1027. ring_index[i] = i % rss_rings;
  1028. }
  1029. if (key)
  1030. memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
  1031. if (hfunc)
  1032. *hfunc = priv->rss_hash_fn;
  1033. return err;
  1034. }
  1035. static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
  1036. const u8 *key, const u8 hfunc)
  1037. {
  1038. struct mlx4_en_priv *priv = netdev_priv(dev);
  1039. u32 n = mlx4_en_get_rxfh_indir_size(dev);
  1040. struct mlx4_en_dev *mdev = priv->mdev;
  1041. int port_up = 0;
  1042. int err = 0;
  1043. int i;
  1044. int rss_rings = 0;
  1045. /* Calculate RSS table size and make sure flows are spread evenly
  1046. * between rings
  1047. */
  1048. for (i = 0; i < n; i++) {
  1049. if (!ring_index)
  1050. break;
  1051. if (i > 0 && !ring_index[i] && !rss_rings)
  1052. rss_rings = i;
  1053. if (ring_index[i] != (i % (rss_rings ?: n)))
  1054. return -EINVAL;
  1055. }
  1056. if (!rss_rings)
  1057. rss_rings = n;
  1058. /* RSS table size must be an order of 2 */
  1059. if (!is_power_of_2(rss_rings))
  1060. return -EINVAL;
  1061. if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
  1062. err = mlx4_en_check_rxfh_func(dev, hfunc);
  1063. if (err)
  1064. return err;
  1065. }
  1066. mutex_lock(&mdev->state_lock);
  1067. if (priv->port_up) {
  1068. port_up = 1;
  1069. mlx4_en_stop_port(dev, 1);
  1070. }
  1071. if (ring_index)
  1072. priv->prof->rss_rings = rss_rings;
  1073. if (key)
  1074. memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
  1075. if (hfunc != ETH_RSS_HASH_NO_CHANGE)
  1076. priv->rss_hash_fn = hfunc;
  1077. if (port_up) {
  1078. err = mlx4_en_start_port(dev);
  1079. if (err)
  1080. en_err(priv, "Failed starting port\n");
  1081. }
  1082. mutex_unlock(&mdev->state_lock);
  1083. return err;
  1084. }
  1085. #define all_zeros_or_all_ones(field) \
  1086. ((field) == 0 || (field) == (__force typeof(field))-1)
  1087. static int mlx4_en_validate_flow(struct net_device *dev,
  1088. struct ethtool_rxnfc *cmd)
  1089. {
  1090. struct ethtool_usrip4_spec *l3_mask;
  1091. struct ethtool_tcpip4_spec *l4_mask;
  1092. struct ethhdr *eth_mask;
  1093. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1094. return -EINVAL;
  1095. if (cmd->fs.flow_type & FLOW_MAC_EXT) {
  1096. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1097. if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
  1098. return -EINVAL;
  1099. }
  1100. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1101. case TCP_V4_FLOW:
  1102. case UDP_V4_FLOW:
  1103. if (cmd->fs.m_u.tcp_ip4_spec.tos)
  1104. return -EINVAL;
  1105. l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1106. /* don't allow mask which isn't all 0 or 1 */
  1107. if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
  1108. !all_zeros_or_all_ones(l4_mask->ip4dst) ||
  1109. !all_zeros_or_all_ones(l4_mask->psrc) ||
  1110. !all_zeros_or_all_ones(l4_mask->pdst))
  1111. return -EINVAL;
  1112. break;
  1113. case IP_USER_FLOW:
  1114. l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1115. if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
  1116. cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
  1117. (!l3_mask->ip4src && !l3_mask->ip4dst) ||
  1118. !all_zeros_or_all_ones(l3_mask->ip4src) ||
  1119. !all_zeros_or_all_ones(l3_mask->ip4dst))
  1120. return -EINVAL;
  1121. break;
  1122. case ETHER_FLOW:
  1123. eth_mask = &cmd->fs.m_u.ether_spec;
  1124. /* source mac mask must not be set */
  1125. if (!is_zero_ether_addr(eth_mask->h_source))
  1126. return -EINVAL;
  1127. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1128. if (!is_broadcast_ether_addr(eth_mask->h_dest))
  1129. return -EINVAL;
  1130. if (!all_zeros_or_all_ones(eth_mask->h_proto))
  1131. return -EINVAL;
  1132. break;
  1133. default:
  1134. return -EINVAL;
  1135. }
  1136. if ((cmd->fs.flow_type & FLOW_EXT)) {
  1137. if (cmd->fs.m_ext.vlan_etype ||
  1138. !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1139. 0 ||
  1140. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1141. cpu_to_be16(VLAN_VID_MASK)))
  1142. return -EINVAL;
  1143. if (cmd->fs.m_ext.vlan_tci) {
  1144. if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
  1145. return -EINVAL;
  1146. }
  1147. }
  1148. return 0;
  1149. }
  1150. static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
  1151. struct list_head *rule_list_h,
  1152. struct mlx4_spec_list *spec_l2,
  1153. unsigned char *mac)
  1154. {
  1155. int err = 0;
  1156. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  1157. spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
  1158. memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
  1159. memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
  1160. if ((cmd->fs.flow_type & FLOW_EXT) &&
  1161. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
  1162. spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
  1163. spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
  1164. }
  1165. list_add_tail(&spec_l2->list, rule_list_h);
  1166. return err;
  1167. }
  1168. static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
  1169. struct ethtool_rxnfc *cmd,
  1170. struct list_head *rule_list_h,
  1171. struct mlx4_spec_list *spec_l2,
  1172. __be32 ipv4_dst)
  1173. {
  1174. #ifdef CONFIG_INET
  1175. unsigned char mac[ETH_ALEN];
  1176. if (!ipv4_is_multicast(ipv4_dst)) {
  1177. if (cmd->fs.flow_type & FLOW_MAC_EXT)
  1178. memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
  1179. else
  1180. memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
  1181. } else {
  1182. ip_eth_mc_map(ipv4_dst, mac);
  1183. }
  1184. return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
  1185. #else
  1186. return -EINVAL;
  1187. #endif
  1188. }
  1189. static int add_ip_rule(struct mlx4_en_priv *priv,
  1190. struct ethtool_rxnfc *cmd,
  1191. struct list_head *list_h)
  1192. {
  1193. int err;
  1194. struct mlx4_spec_list *spec_l2 = NULL;
  1195. struct mlx4_spec_list *spec_l3 = NULL;
  1196. struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1197. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1198. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1199. if (!spec_l2 || !spec_l3) {
  1200. err = -ENOMEM;
  1201. goto free_spec;
  1202. }
  1203. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
  1204. cmd->fs.h_u.
  1205. usr_ip4_spec.ip4dst);
  1206. if (err)
  1207. goto free_spec;
  1208. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1209. spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
  1210. if (l3_mask->ip4src)
  1211. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1212. spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
  1213. if (l3_mask->ip4dst)
  1214. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1215. list_add_tail(&spec_l3->list, list_h);
  1216. return 0;
  1217. free_spec:
  1218. kfree(spec_l2);
  1219. kfree(spec_l3);
  1220. return err;
  1221. }
  1222. static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
  1223. struct ethtool_rxnfc *cmd,
  1224. struct list_head *list_h, int proto)
  1225. {
  1226. int err;
  1227. struct mlx4_spec_list *spec_l2 = NULL;
  1228. struct mlx4_spec_list *spec_l3 = NULL;
  1229. struct mlx4_spec_list *spec_l4 = NULL;
  1230. struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1231. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1232. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1233. spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
  1234. if (!spec_l2 || !spec_l3 || !spec_l4) {
  1235. err = -ENOMEM;
  1236. goto free_spec;
  1237. }
  1238. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1239. if (proto == TCP_V4_FLOW) {
  1240. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1241. spec_l2,
  1242. cmd->fs.h_u.
  1243. tcp_ip4_spec.ip4dst);
  1244. if (err)
  1245. goto free_spec;
  1246. spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
  1247. spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
  1248. spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
  1249. spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
  1250. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
  1251. } else {
  1252. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1253. spec_l2,
  1254. cmd->fs.h_u.
  1255. udp_ip4_spec.ip4dst);
  1256. if (err)
  1257. goto free_spec;
  1258. spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
  1259. spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
  1260. spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
  1261. spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
  1262. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
  1263. }
  1264. if (l4_mask->ip4src)
  1265. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1266. if (l4_mask->ip4dst)
  1267. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1268. if (l4_mask->psrc)
  1269. spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
  1270. if (l4_mask->pdst)
  1271. spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
  1272. list_add_tail(&spec_l3->list, list_h);
  1273. list_add_tail(&spec_l4->list, list_h);
  1274. return 0;
  1275. free_spec:
  1276. kfree(spec_l2);
  1277. kfree(spec_l3);
  1278. kfree(spec_l4);
  1279. return err;
  1280. }
  1281. static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
  1282. struct ethtool_rxnfc *cmd,
  1283. struct list_head *rule_list_h)
  1284. {
  1285. int err;
  1286. struct ethhdr *eth_spec;
  1287. struct mlx4_spec_list *spec_l2;
  1288. struct mlx4_en_priv *priv = netdev_priv(dev);
  1289. err = mlx4_en_validate_flow(dev, cmd);
  1290. if (err)
  1291. return err;
  1292. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1293. case ETHER_FLOW:
  1294. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1295. if (!spec_l2)
  1296. return -ENOMEM;
  1297. eth_spec = &cmd->fs.h_u.ether_spec;
  1298. mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
  1299. &eth_spec->h_dest[0]);
  1300. spec_l2->eth.ether_type = eth_spec->h_proto;
  1301. if (eth_spec->h_proto)
  1302. spec_l2->eth.ether_type_enable = 1;
  1303. break;
  1304. case IP_USER_FLOW:
  1305. err = add_ip_rule(priv, cmd, rule_list_h);
  1306. break;
  1307. case TCP_V4_FLOW:
  1308. err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
  1309. break;
  1310. case UDP_V4_FLOW:
  1311. err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
  1312. break;
  1313. }
  1314. return err;
  1315. }
  1316. static int mlx4_en_flow_replace(struct net_device *dev,
  1317. struct ethtool_rxnfc *cmd)
  1318. {
  1319. int err;
  1320. struct mlx4_en_priv *priv = netdev_priv(dev);
  1321. struct ethtool_flow_id *loc_rule;
  1322. struct mlx4_spec_list *spec, *tmp_spec;
  1323. u32 qpn;
  1324. u64 reg_id;
  1325. struct mlx4_net_trans_rule rule = {
  1326. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  1327. .exclusive = 0,
  1328. .allow_loopback = 1,
  1329. .promisc_mode = MLX4_FS_REGULAR,
  1330. };
  1331. rule.port = priv->port;
  1332. rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
  1333. INIT_LIST_HEAD(&rule.list);
  1334. /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
  1335. if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
  1336. qpn = priv->drop_qp.qpn;
  1337. else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
  1338. qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
  1339. } else {
  1340. if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
  1341. en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
  1342. cmd->fs.ring_cookie);
  1343. return -EINVAL;
  1344. }
  1345. qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
  1346. if (!qpn) {
  1347. en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
  1348. cmd->fs.ring_cookie);
  1349. return -EINVAL;
  1350. }
  1351. }
  1352. rule.qpn = qpn;
  1353. err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
  1354. if (err)
  1355. goto out_free_list;
  1356. loc_rule = &priv->ethtool_rules[cmd->fs.location];
  1357. if (loc_rule->id) {
  1358. err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
  1359. if (err) {
  1360. en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
  1361. cmd->fs.location, loc_rule->id);
  1362. goto out_free_list;
  1363. }
  1364. loc_rule->id = 0;
  1365. memset(&loc_rule->flow_spec, 0,
  1366. sizeof(struct ethtool_rx_flow_spec));
  1367. list_del(&loc_rule->list);
  1368. }
  1369. err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
  1370. if (err) {
  1371. en_err(priv, "Fail to attach network rule at location %d\n",
  1372. cmd->fs.location);
  1373. goto out_free_list;
  1374. }
  1375. loc_rule->id = reg_id;
  1376. memcpy(&loc_rule->flow_spec, &cmd->fs,
  1377. sizeof(struct ethtool_rx_flow_spec));
  1378. list_add_tail(&loc_rule->list, &priv->ethtool_list);
  1379. out_free_list:
  1380. list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
  1381. list_del(&spec->list);
  1382. kfree(spec);
  1383. }
  1384. return err;
  1385. }
  1386. static int mlx4_en_flow_detach(struct net_device *dev,
  1387. struct ethtool_rxnfc *cmd)
  1388. {
  1389. int err = 0;
  1390. struct ethtool_flow_id *rule;
  1391. struct mlx4_en_priv *priv = netdev_priv(dev);
  1392. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1393. return -EINVAL;
  1394. rule = &priv->ethtool_rules[cmd->fs.location];
  1395. if (!rule->id) {
  1396. err = -ENOENT;
  1397. goto out;
  1398. }
  1399. err = mlx4_flow_detach(priv->mdev->dev, rule->id);
  1400. if (err) {
  1401. en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
  1402. cmd->fs.location, rule->id);
  1403. goto out;
  1404. }
  1405. rule->id = 0;
  1406. memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
  1407. list_del(&rule->list);
  1408. out:
  1409. return err;
  1410. }
  1411. static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1412. int loc)
  1413. {
  1414. int err = 0;
  1415. struct ethtool_flow_id *rule;
  1416. struct mlx4_en_priv *priv = netdev_priv(dev);
  1417. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  1418. return -EINVAL;
  1419. rule = &priv->ethtool_rules[loc];
  1420. if (rule->id)
  1421. memcpy(&cmd->fs, &rule->flow_spec,
  1422. sizeof(struct ethtool_rx_flow_spec));
  1423. else
  1424. err = -ENOENT;
  1425. return err;
  1426. }
  1427. static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
  1428. {
  1429. int i, res = 0;
  1430. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  1431. if (priv->ethtool_rules[i].id)
  1432. res++;
  1433. }
  1434. return res;
  1435. }
  1436. static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1437. u32 *rule_locs)
  1438. {
  1439. struct mlx4_en_priv *priv = netdev_priv(dev);
  1440. struct mlx4_en_dev *mdev = priv->mdev;
  1441. int err = 0;
  1442. int i = 0, priority = 0;
  1443. if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
  1444. cmd->cmd == ETHTOOL_GRXCLSRULE ||
  1445. cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
  1446. (mdev->dev->caps.steering_mode !=
  1447. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
  1448. return -EINVAL;
  1449. switch (cmd->cmd) {
  1450. case ETHTOOL_GRXRINGS:
  1451. cmd->data = priv->rx_ring_num;
  1452. break;
  1453. case ETHTOOL_GRXCLSRLCNT:
  1454. cmd->rule_cnt = mlx4_en_get_num_flows(priv);
  1455. break;
  1456. case ETHTOOL_GRXCLSRULE:
  1457. err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
  1458. break;
  1459. case ETHTOOL_GRXCLSRLALL:
  1460. while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
  1461. err = mlx4_en_get_flow(dev, cmd, i);
  1462. if (!err)
  1463. rule_locs[priority++] = i;
  1464. i++;
  1465. }
  1466. err = 0;
  1467. break;
  1468. default:
  1469. err = -EOPNOTSUPP;
  1470. break;
  1471. }
  1472. return err;
  1473. }
  1474. static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1475. {
  1476. int err = 0;
  1477. struct mlx4_en_priv *priv = netdev_priv(dev);
  1478. struct mlx4_en_dev *mdev = priv->mdev;
  1479. if (mdev->dev->caps.steering_mode !=
  1480. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
  1481. return -EINVAL;
  1482. switch (cmd->cmd) {
  1483. case ETHTOOL_SRXCLSRLINS:
  1484. err = mlx4_en_flow_replace(dev, cmd);
  1485. break;
  1486. case ETHTOOL_SRXCLSRLDEL:
  1487. err = mlx4_en_flow_detach(dev, cmd);
  1488. break;
  1489. default:
  1490. en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
  1491. return -EINVAL;
  1492. }
  1493. return err;
  1494. }
  1495. static void mlx4_en_get_channels(struct net_device *dev,
  1496. struct ethtool_channels *channel)
  1497. {
  1498. struct mlx4_en_priv *priv = netdev_priv(dev);
  1499. channel->max_rx = MAX_RX_RINGS;
  1500. channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
  1501. channel->rx_count = priv->rx_ring_num;
  1502. channel->tx_count = priv->tx_ring_num[TX] /
  1503. priv->prof->num_up;
  1504. }
  1505. static int mlx4_en_set_channels(struct net_device *dev,
  1506. struct ethtool_channels *channel)
  1507. {
  1508. struct mlx4_en_priv *priv = netdev_priv(dev);
  1509. struct mlx4_en_dev *mdev = priv->mdev;
  1510. struct mlx4_en_port_profile new_prof;
  1511. struct mlx4_en_priv *tmp;
  1512. int port_up = 0;
  1513. int xdp_count;
  1514. int err = 0;
  1515. u8 up;
  1516. if (!channel->tx_count || !channel->rx_count)
  1517. return -EINVAL;
  1518. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  1519. if (!tmp)
  1520. return -ENOMEM;
  1521. mutex_lock(&mdev->state_lock);
  1522. xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
  1523. if (channel->tx_count * priv->prof->num_up + xdp_count >
  1524. MAX_TX_RINGS) {
  1525. err = -EINVAL;
  1526. en_err(priv,
  1527. "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
  1528. channel->tx_count * priv->prof->num_up + xdp_count,
  1529. MAX_TX_RINGS);
  1530. goto out;
  1531. }
  1532. memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
  1533. new_prof.num_tx_rings_p_up = channel->tx_count;
  1534. new_prof.tx_ring_num[TX] = channel->tx_count * priv->prof->num_up;
  1535. new_prof.tx_ring_num[TX_XDP] = xdp_count;
  1536. new_prof.rx_ring_num = channel->rx_count;
  1537. err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
  1538. if (err)
  1539. goto out;
  1540. if (priv->port_up) {
  1541. port_up = 1;
  1542. mlx4_en_stop_port(dev, 1);
  1543. }
  1544. mlx4_en_safe_replace_resources(priv, tmp);
  1545. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1546. up = (priv->prof->num_up == MLX4_EN_NUM_UP_LOW) ?
  1547. 0 : priv->prof->num_up;
  1548. mlx4_en_setup_tc(dev, up);
  1549. en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
  1550. en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
  1551. if (port_up) {
  1552. err = mlx4_en_start_port(dev);
  1553. if (err)
  1554. en_err(priv, "Failed starting port\n");
  1555. }
  1556. err = mlx4_en_moderation_update(priv);
  1557. out:
  1558. mutex_unlock(&mdev->state_lock);
  1559. kfree(tmp);
  1560. return err;
  1561. }
  1562. static int mlx4_en_get_ts_info(struct net_device *dev,
  1563. struct ethtool_ts_info *info)
  1564. {
  1565. struct mlx4_en_priv *priv = netdev_priv(dev);
  1566. struct mlx4_en_dev *mdev = priv->mdev;
  1567. int ret;
  1568. ret = ethtool_op_get_ts_info(dev, info);
  1569. if (ret)
  1570. return ret;
  1571. if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
  1572. info->so_timestamping |=
  1573. SOF_TIMESTAMPING_TX_HARDWARE |
  1574. SOF_TIMESTAMPING_RX_HARDWARE |
  1575. SOF_TIMESTAMPING_RAW_HARDWARE;
  1576. info->tx_types =
  1577. (1 << HWTSTAMP_TX_OFF) |
  1578. (1 << HWTSTAMP_TX_ON);
  1579. info->rx_filters =
  1580. (1 << HWTSTAMP_FILTER_NONE) |
  1581. (1 << HWTSTAMP_FILTER_ALL);
  1582. if (mdev->ptp_clock)
  1583. info->phc_index = ptp_clock_index(mdev->ptp_clock);
  1584. }
  1585. return ret;
  1586. }
  1587. static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
  1588. {
  1589. struct mlx4_en_priv *priv = netdev_priv(dev);
  1590. struct mlx4_en_dev *mdev = priv->mdev;
  1591. bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1592. bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1593. bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
  1594. bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
  1595. int i;
  1596. int ret = 0;
  1597. if (bf_enabled_new != bf_enabled_old) {
  1598. int t;
  1599. if (bf_enabled_new) {
  1600. bool bf_supported = true;
  1601. for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
  1602. for (i = 0; i < priv->tx_ring_num[t]; i++)
  1603. bf_supported &=
  1604. priv->tx_ring[t][i]->bf_alloced;
  1605. if (!bf_supported) {
  1606. en_err(priv, "BlueFlame is not supported\n");
  1607. return -EINVAL;
  1608. }
  1609. priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1610. } else {
  1611. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1612. }
  1613. for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
  1614. for (i = 0; i < priv->tx_ring_num[t]; i++)
  1615. priv->tx_ring[t][i]->bf_enabled =
  1616. bf_enabled_new;
  1617. en_info(priv, "BlueFlame %s\n",
  1618. bf_enabled_new ? "Enabled" : "Disabled");
  1619. }
  1620. if (phv_enabled_new != phv_enabled_old) {
  1621. ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
  1622. if (ret)
  1623. return ret;
  1624. else if (phv_enabled_new)
  1625. priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
  1626. else
  1627. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
  1628. en_info(priv, "PHV bit %s\n",
  1629. phv_enabled_new ? "Enabled" : "Disabled");
  1630. }
  1631. return 0;
  1632. }
  1633. static u32 mlx4_en_get_priv_flags(struct net_device *dev)
  1634. {
  1635. struct mlx4_en_priv *priv = netdev_priv(dev);
  1636. return priv->pflags;
  1637. }
  1638. static int mlx4_en_get_tunable(struct net_device *dev,
  1639. const struct ethtool_tunable *tuna,
  1640. void *data)
  1641. {
  1642. const struct mlx4_en_priv *priv = netdev_priv(dev);
  1643. int ret = 0;
  1644. switch (tuna->id) {
  1645. case ETHTOOL_TX_COPYBREAK:
  1646. *(u32 *)data = priv->prof->inline_thold;
  1647. break;
  1648. default:
  1649. ret = -EINVAL;
  1650. break;
  1651. }
  1652. return ret;
  1653. }
  1654. static int mlx4_en_set_tunable(struct net_device *dev,
  1655. const struct ethtool_tunable *tuna,
  1656. const void *data)
  1657. {
  1658. struct mlx4_en_priv *priv = netdev_priv(dev);
  1659. int val, ret = 0;
  1660. switch (tuna->id) {
  1661. case ETHTOOL_TX_COPYBREAK:
  1662. val = *(u32 *)data;
  1663. if (val < MIN_PKT_LEN || val > MAX_INLINE)
  1664. ret = -EINVAL;
  1665. else
  1666. priv->prof->inline_thold = val;
  1667. break;
  1668. default:
  1669. ret = -EINVAL;
  1670. break;
  1671. }
  1672. return ret;
  1673. }
  1674. static int mlx4_en_get_module_info(struct net_device *dev,
  1675. struct ethtool_modinfo *modinfo)
  1676. {
  1677. struct mlx4_en_priv *priv = netdev_priv(dev);
  1678. struct mlx4_en_dev *mdev = priv->mdev;
  1679. int ret;
  1680. u8 data[4];
  1681. /* Read first 2 bytes to get Module & REV ID */
  1682. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1683. 0/*offset*/, 2/*size*/, data);
  1684. if (ret < 2)
  1685. return -EIO;
  1686. switch (data[0] /* identifier */) {
  1687. case MLX4_MODULE_ID_QSFP:
  1688. modinfo->type = ETH_MODULE_SFF_8436;
  1689. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1690. break;
  1691. case MLX4_MODULE_ID_QSFP_PLUS:
  1692. if (data[1] >= 0x3) { /* revision id */
  1693. modinfo->type = ETH_MODULE_SFF_8636;
  1694. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1695. } else {
  1696. modinfo->type = ETH_MODULE_SFF_8436;
  1697. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1698. }
  1699. break;
  1700. case MLX4_MODULE_ID_QSFP28:
  1701. modinfo->type = ETH_MODULE_SFF_8636;
  1702. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1703. break;
  1704. case MLX4_MODULE_ID_SFP:
  1705. modinfo->type = ETH_MODULE_SFF_8472;
  1706. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  1707. break;
  1708. default:
  1709. return -EINVAL;
  1710. }
  1711. return 0;
  1712. }
  1713. static int mlx4_en_get_module_eeprom(struct net_device *dev,
  1714. struct ethtool_eeprom *ee,
  1715. u8 *data)
  1716. {
  1717. struct mlx4_en_priv *priv = netdev_priv(dev);
  1718. struct mlx4_en_dev *mdev = priv->mdev;
  1719. int offset = ee->offset;
  1720. int i = 0, ret;
  1721. if (ee->len == 0)
  1722. return -EINVAL;
  1723. memset(data, 0, ee->len);
  1724. while (i < ee->len) {
  1725. en_dbg(DRV, priv,
  1726. "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
  1727. i, offset, ee->len - i);
  1728. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1729. offset, ee->len - i, data + i);
  1730. if (!ret) /* Done reading */
  1731. return 0;
  1732. if (ret < 0) {
  1733. en_err(priv,
  1734. "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
  1735. i, offset, ee->len - i, ret);
  1736. return 0;
  1737. }
  1738. i += ret;
  1739. offset += ret;
  1740. }
  1741. return 0;
  1742. }
  1743. static int mlx4_en_set_phys_id(struct net_device *dev,
  1744. enum ethtool_phys_id_state state)
  1745. {
  1746. int err;
  1747. u16 beacon_duration;
  1748. struct mlx4_en_priv *priv = netdev_priv(dev);
  1749. struct mlx4_en_dev *mdev = priv->mdev;
  1750. if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
  1751. return -EOPNOTSUPP;
  1752. switch (state) {
  1753. case ETHTOOL_ID_ACTIVE:
  1754. beacon_duration = PORT_BEACON_MAX_LIMIT;
  1755. break;
  1756. case ETHTOOL_ID_INACTIVE:
  1757. beacon_duration = 0;
  1758. break;
  1759. default:
  1760. return -EOPNOTSUPP;
  1761. }
  1762. err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
  1763. return err;
  1764. }
  1765. const struct ethtool_ops mlx4_en_ethtool_ops = {
  1766. .get_drvinfo = mlx4_en_get_drvinfo,
  1767. .get_link_ksettings = mlx4_en_get_link_ksettings,
  1768. .set_link_ksettings = mlx4_en_set_link_ksettings,
  1769. .get_link = ethtool_op_get_link,
  1770. .get_strings = mlx4_en_get_strings,
  1771. .get_sset_count = mlx4_en_get_sset_count,
  1772. .get_ethtool_stats = mlx4_en_get_ethtool_stats,
  1773. .self_test = mlx4_en_self_test,
  1774. .set_phys_id = mlx4_en_set_phys_id,
  1775. .get_wol = mlx4_en_get_wol,
  1776. .set_wol = mlx4_en_set_wol,
  1777. .get_msglevel = mlx4_en_get_msglevel,
  1778. .set_msglevel = mlx4_en_set_msglevel,
  1779. .get_coalesce = mlx4_en_get_coalesce,
  1780. .set_coalesce = mlx4_en_set_coalesce,
  1781. .get_pauseparam = mlx4_en_get_pauseparam,
  1782. .set_pauseparam = mlx4_en_set_pauseparam,
  1783. .get_ringparam = mlx4_en_get_ringparam,
  1784. .set_ringparam = mlx4_en_set_ringparam,
  1785. .get_rxnfc = mlx4_en_get_rxnfc,
  1786. .set_rxnfc = mlx4_en_set_rxnfc,
  1787. .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
  1788. .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
  1789. .get_rxfh = mlx4_en_get_rxfh,
  1790. .set_rxfh = mlx4_en_set_rxfh,
  1791. .get_channels = mlx4_en_get_channels,
  1792. .set_channels = mlx4_en_set_channels,
  1793. .get_ts_info = mlx4_en_get_ts_info,
  1794. .set_priv_flags = mlx4_en_set_priv_flags,
  1795. .get_priv_flags = mlx4_en_get_priv_flags,
  1796. .get_tunable = mlx4_en_get_tunable,
  1797. .set_tunable = mlx4_en_set_tunable,
  1798. .get_module_info = mlx4_en_get_module_info,
  1799. .get_module_eeprom = mlx4_en_get_module_eeprom
  1800. };