en_ethtool.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/mlx4/driver.h>
  37. #include <linux/mlx4/device.h>
  38. #include <linux/in.h>
  39. #include <net/ip.h>
  40. #include <linux/bitmap.h>
  41. #include "mlx4_en.h"
  42. #include "en_port.h"
  43. #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
  44. #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
  45. #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
  46. static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
  47. {
  48. int i, t;
  49. int err = 0;
  50. for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
  51. for (i = 0; i < priv->tx_ring_num[t]; i++) {
  52. priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
  53. priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
  54. if (priv->port_up) {
  55. err = mlx4_en_set_cq_moder(priv,
  56. priv->tx_cq[t][i]);
  57. if (err)
  58. return err;
  59. }
  60. }
  61. }
  62. if (priv->adaptive_rx_coal)
  63. return 0;
  64. for (i = 0; i < priv->rx_ring_num; i++) {
  65. priv->rx_cq[i]->moder_cnt = priv->rx_frames;
  66. priv->rx_cq[i]->moder_time = priv->rx_usecs;
  67. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  68. if (priv->port_up) {
  69. err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
  70. if (err)
  71. return err;
  72. }
  73. }
  74. return err;
  75. }
  76. static void
  77. mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
  78. {
  79. struct mlx4_en_priv *priv = netdev_priv(dev);
  80. struct mlx4_en_dev *mdev = priv->mdev;
  81. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  82. strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
  83. sizeof(drvinfo->version));
  84. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  85. "%d.%d.%d",
  86. (u16) (mdev->dev->caps.fw_ver >> 32),
  87. (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
  88. (u16) (mdev->dev->caps.fw_ver & 0xffff));
  89. strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
  90. sizeof(drvinfo->bus_info));
  91. }
  92. static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
  93. "blueflame",
  94. "phv-bit"
  95. };
  96. static const char main_strings[][ETH_GSTRING_LEN] = {
  97. /* main statistics */
  98. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  99. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  100. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  101. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  102. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  103. "tx_heartbeat_errors", "tx_window_errors",
  104. /* port statistics */
  105. "tso_packets",
  106. "xmit_more",
  107. "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
  108. "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
  109. /* pf statistics */
  110. "pf_rx_packets",
  111. "pf_rx_bytes",
  112. "pf_tx_packets",
  113. "pf_tx_bytes",
  114. /* priority flow control statistics rx */
  115. "rx_pause_prio_0", "rx_pause_duration_prio_0",
  116. "rx_pause_transition_prio_0",
  117. "rx_pause_prio_1", "rx_pause_duration_prio_1",
  118. "rx_pause_transition_prio_1",
  119. "rx_pause_prio_2", "rx_pause_duration_prio_2",
  120. "rx_pause_transition_prio_2",
  121. "rx_pause_prio_3", "rx_pause_duration_prio_3",
  122. "rx_pause_transition_prio_3",
  123. "rx_pause_prio_4", "rx_pause_duration_prio_4",
  124. "rx_pause_transition_prio_4",
  125. "rx_pause_prio_5", "rx_pause_duration_prio_5",
  126. "rx_pause_transition_prio_5",
  127. "rx_pause_prio_6", "rx_pause_duration_prio_6",
  128. "rx_pause_transition_prio_6",
  129. "rx_pause_prio_7", "rx_pause_duration_prio_7",
  130. "rx_pause_transition_prio_7",
  131. /* flow control statistics rx */
  132. "rx_pause", "rx_pause_duration", "rx_pause_transition",
  133. /* priority flow control statistics tx */
  134. "tx_pause_prio_0", "tx_pause_duration_prio_0",
  135. "tx_pause_transition_prio_0",
  136. "tx_pause_prio_1", "tx_pause_duration_prio_1",
  137. "tx_pause_transition_prio_1",
  138. "tx_pause_prio_2", "tx_pause_duration_prio_2",
  139. "tx_pause_transition_prio_2",
  140. "tx_pause_prio_3", "tx_pause_duration_prio_3",
  141. "tx_pause_transition_prio_3",
  142. "tx_pause_prio_4", "tx_pause_duration_prio_4",
  143. "tx_pause_transition_prio_4",
  144. "tx_pause_prio_5", "tx_pause_duration_prio_5",
  145. "tx_pause_transition_prio_5",
  146. "tx_pause_prio_6", "tx_pause_duration_prio_6",
  147. "tx_pause_transition_prio_6",
  148. "tx_pause_prio_7", "tx_pause_duration_prio_7",
  149. "tx_pause_transition_prio_7",
  150. /* flow control statistics tx */
  151. "tx_pause", "tx_pause_duration", "tx_pause_transition",
  152. /* packet statistics */
  153. "rx_multicast_packets",
  154. "rx_broadcast_packets",
  155. "rx_jabbers",
  156. "rx_in_range_length_error",
  157. "rx_out_range_length_error",
  158. "tx_multicast_packets",
  159. "tx_broadcast_packets",
  160. "rx_prio_0_packets", "rx_prio_0_bytes",
  161. "rx_prio_1_packets", "rx_prio_1_bytes",
  162. "rx_prio_2_packets", "rx_prio_2_bytes",
  163. "rx_prio_3_packets", "rx_prio_3_bytes",
  164. "rx_prio_4_packets", "rx_prio_4_bytes",
  165. "rx_prio_5_packets", "rx_prio_5_bytes",
  166. "rx_prio_6_packets", "rx_prio_6_bytes",
  167. "rx_prio_7_packets", "rx_prio_7_bytes",
  168. "rx_novlan_packets", "rx_novlan_bytes",
  169. "tx_prio_0_packets", "tx_prio_0_bytes",
  170. "tx_prio_1_packets", "tx_prio_1_bytes",
  171. "tx_prio_2_packets", "tx_prio_2_bytes",
  172. "tx_prio_3_packets", "tx_prio_3_bytes",
  173. "tx_prio_4_packets", "tx_prio_4_bytes",
  174. "tx_prio_5_packets", "tx_prio_5_bytes",
  175. "tx_prio_6_packets", "tx_prio_6_bytes",
  176. "tx_prio_7_packets", "tx_prio_7_bytes",
  177. "tx_novlan_packets", "tx_novlan_bytes",
  178. /* xdp statistics */
  179. "rx_xdp_drop",
  180. "rx_xdp_tx",
  181. "rx_xdp_tx_full",
  182. };
  183. static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
  184. "Interrupt Test",
  185. "Link Test",
  186. "Speed Test",
  187. "Register Test",
  188. "Loopback Test",
  189. };
  190. static u32 mlx4_en_get_msglevel(struct net_device *dev)
  191. {
  192. return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
  193. }
  194. static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
  195. {
  196. ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
  197. }
  198. static void mlx4_en_get_wol(struct net_device *netdev,
  199. struct ethtool_wolinfo *wol)
  200. {
  201. struct mlx4_en_priv *priv = netdev_priv(netdev);
  202. int err = 0;
  203. u64 config = 0;
  204. u64 mask;
  205. if ((priv->port < 1) || (priv->port > 2)) {
  206. en_err(priv, "Failed to get WoL information\n");
  207. return;
  208. }
  209. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  210. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  211. if (!(priv->mdev->dev->caps.flags & mask)) {
  212. wol->supported = 0;
  213. wol->wolopts = 0;
  214. return;
  215. }
  216. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  217. if (err) {
  218. en_err(priv, "Failed to get WoL information\n");
  219. return;
  220. }
  221. if (config & MLX4_EN_WOL_MAGIC)
  222. wol->supported = WAKE_MAGIC;
  223. else
  224. wol->supported = 0;
  225. if (config & MLX4_EN_WOL_ENABLED)
  226. wol->wolopts = WAKE_MAGIC;
  227. else
  228. wol->wolopts = 0;
  229. }
  230. static int mlx4_en_set_wol(struct net_device *netdev,
  231. struct ethtool_wolinfo *wol)
  232. {
  233. struct mlx4_en_priv *priv = netdev_priv(netdev);
  234. u64 config = 0;
  235. int err = 0;
  236. u64 mask;
  237. if ((priv->port < 1) || (priv->port > 2))
  238. return -EOPNOTSUPP;
  239. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  240. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  241. if (!(priv->mdev->dev->caps.flags & mask))
  242. return -EOPNOTSUPP;
  243. if (wol->supported & ~WAKE_MAGIC)
  244. return -EINVAL;
  245. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  246. if (err) {
  247. en_err(priv, "Failed to get WoL info, unable to modify\n");
  248. return err;
  249. }
  250. if (wol->wolopts & WAKE_MAGIC) {
  251. config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
  252. MLX4_EN_WOL_MAGIC;
  253. } else {
  254. config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
  255. config |= MLX4_EN_WOL_DO_MODIFY;
  256. }
  257. err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
  258. if (err)
  259. en_err(priv, "Failed to set WoL information\n");
  260. return err;
  261. }
  262. struct bitmap_iterator {
  263. unsigned long *stats_bitmap;
  264. unsigned int count;
  265. unsigned int iterator;
  266. bool advance_array; /* if set, force no increments */
  267. };
  268. static inline void bitmap_iterator_init(struct bitmap_iterator *h,
  269. unsigned long *stats_bitmap,
  270. int count)
  271. {
  272. h->iterator = 0;
  273. h->advance_array = !bitmap_empty(stats_bitmap, count);
  274. h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
  275. : count;
  276. h->stats_bitmap = stats_bitmap;
  277. }
  278. static inline int bitmap_iterator_test(struct bitmap_iterator *h)
  279. {
  280. return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
  281. }
  282. static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
  283. {
  284. return h->iterator++;
  285. }
  286. static inline unsigned int
  287. bitmap_iterator_count(struct bitmap_iterator *h)
  288. {
  289. return h->count;
  290. }
  291. static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
  292. {
  293. struct mlx4_en_priv *priv = netdev_priv(dev);
  294. struct bitmap_iterator it;
  295. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  296. switch (sset) {
  297. case ETH_SS_STATS:
  298. return bitmap_iterator_count(&it) +
  299. (priv->tx_ring_num[TX] * 2) +
  300. (priv->rx_ring_num * (3 + NUM_XDP_STATS));
  301. case ETH_SS_TEST:
  302. return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
  303. & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
  304. case ETH_SS_PRIV_FLAGS:
  305. return ARRAY_SIZE(mlx4_en_priv_flags);
  306. default:
  307. return -EOPNOTSUPP;
  308. }
  309. }
  310. static void mlx4_en_get_ethtool_stats(struct net_device *dev,
  311. struct ethtool_stats *stats, uint64_t *data)
  312. {
  313. struct mlx4_en_priv *priv = netdev_priv(dev);
  314. int index = 0;
  315. int i;
  316. struct bitmap_iterator it;
  317. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  318. spin_lock_bh(&priv->stats_lock);
  319. mlx4_en_fold_software_stats(dev);
  320. for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
  321. if (bitmap_iterator_test(&it))
  322. data[index++] = ((unsigned long *)&dev->stats)[i];
  323. for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
  324. if (bitmap_iterator_test(&it))
  325. data[index++] = ((unsigned long *)&priv->port_stats)[i];
  326. for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
  327. if (bitmap_iterator_test(&it))
  328. data[index++] =
  329. ((unsigned long *)&priv->pf_stats)[i];
  330. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
  331. i++, bitmap_iterator_inc(&it))
  332. if (bitmap_iterator_test(&it))
  333. data[index++] =
  334. ((u64 *)&priv->rx_priority_flowstats)[i];
  335. for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
  336. if (bitmap_iterator_test(&it))
  337. data[index++] = ((u64 *)&priv->rx_flowstats)[i];
  338. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
  339. i++, bitmap_iterator_inc(&it))
  340. if (bitmap_iterator_test(&it))
  341. data[index++] =
  342. ((u64 *)&priv->tx_priority_flowstats)[i];
  343. for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
  344. if (bitmap_iterator_test(&it))
  345. data[index++] = ((u64 *)&priv->tx_flowstats)[i];
  346. for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
  347. if (bitmap_iterator_test(&it))
  348. data[index++] = ((unsigned long *)&priv->pkstats)[i];
  349. for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
  350. if (bitmap_iterator_test(&it))
  351. data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
  352. for (i = 0; i < priv->tx_ring_num[TX]; i++) {
  353. data[index++] = priv->tx_ring[TX][i]->packets;
  354. data[index++] = priv->tx_ring[TX][i]->bytes;
  355. }
  356. for (i = 0; i < priv->rx_ring_num; i++) {
  357. data[index++] = priv->rx_ring[i]->packets;
  358. data[index++] = priv->rx_ring[i]->bytes;
  359. data[index++] = priv->rx_ring[i]->dropped;
  360. data[index++] = priv->rx_ring[i]->xdp_drop;
  361. data[index++] = priv->rx_ring[i]->xdp_tx;
  362. data[index++] = priv->rx_ring[i]->xdp_tx_full;
  363. }
  364. spin_unlock_bh(&priv->stats_lock);
  365. }
  366. static void mlx4_en_self_test(struct net_device *dev,
  367. struct ethtool_test *etest, u64 *buf)
  368. {
  369. mlx4_en_ex_selftest(dev, &etest->flags, buf);
  370. }
  371. static void mlx4_en_get_strings(struct net_device *dev,
  372. uint32_t stringset, uint8_t *data)
  373. {
  374. struct mlx4_en_priv *priv = netdev_priv(dev);
  375. int index = 0;
  376. int i, strings = 0;
  377. struct bitmap_iterator it;
  378. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  379. switch (stringset) {
  380. case ETH_SS_TEST:
  381. for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
  382. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  383. if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
  384. for (; i < MLX4_EN_NUM_SELF_TEST; i++)
  385. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  386. break;
  387. case ETH_SS_STATS:
  388. /* Add main counters */
  389. for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
  390. bitmap_iterator_inc(&it))
  391. if (bitmap_iterator_test(&it))
  392. strcpy(data + (index++) * ETH_GSTRING_LEN,
  393. main_strings[strings]);
  394. for (i = 0; i < NUM_PORT_STATS; i++, strings++,
  395. bitmap_iterator_inc(&it))
  396. if (bitmap_iterator_test(&it))
  397. strcpy(data + (index++) * ETH_GSTRING_LEN,
  398. main_strings[strings]);
  399. for (i = 0; i < NUM_PF_STATS; i++, strings++,
  400. bitmap_iterator_inc(&it))
  401. if (bitmap_iterator_test(&it))
  402. strcpy(data + (index++) * ETH_GSTRING_LEN,
  403. main_strings[strings]);
  404. for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
  405. bitmap_iterator_inc(&it))
  406. if (bitmap_iterator_test(&it))
  407. strcpy(data + (index++) * ETH_GSTRING_LEN,
  408. main_strings[strings]);
  409. for (i = 0; i < NUM_PKT_STATS; i++, strings++,
  410. bitmap_iterator_inc(&it))
  411. if (bitmap_iterator_test(&it))
  412. strcpy(data + (index++) * ETH_GSTRING_LEN,
  413. main_strings[strings]);
  414. for (i = 0; i < NUM_XDP_STATS; i++, strings++,
  415. bitmap_iterator_inc(&it))
  416. if (bitmap_iterator_test(&it))
  417. strcpy(data + (index++) * ETH_GSTRING_LEN,
  418. main_strings[strings]);
  419. for (i = 0; i < priv->tx_ring_num[TX]; i++) {
  420. sprintf(data + (index++) * ETH_GSTRING_LEN,
  421. "tx%d_packets", i);
  422. sprintf(data + (index++) * ETH_GSTRING_LEN,
  423. "tx%d_bytes", i);
  424. }
  425. for (i = 0; i < priv->rx_ring_num; i++) {
  426. sprintf(data + (index++) * ETH_GSTRING_LEN,
  427. "rx%d_packets", i);
  428. sprintf(data + (index++) * ETH_GSTRING_LEN,
  429. "rx%d_bytes", i);
  430. sprintf(data + (index++) * ETH_GSTRING_LEN,
  431. "rx%d_dropped", i);
  432. sprintf(data + (index++) * ETH_GSTRING_LEN,
  433. "rx%d_xdp_drop", i);
  434. sprintf(data + (index++) * ETH_GSTRING_LEN,
  435. "rx%d_xdp_tx", i);
  436. sprintf(data + (index++) * ETH_GSTRING_LEN,
  437. "rx%d_xdp_tx_full", i);
  438. }
  439. break;
  440. case ETH_SS_PRIV_FLAGS:
  441. for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
  442. strcpy(data + i * ETH_GSTRING_LEN,
  443. mlx4_en_priv_flags[i]);
  444. break;
  445. }
  446. }
  447. static u32 mlx4_en_autoneg_get(struct net_device *dev)
  448. {
  449. struct mlx4_en_priv *priv = netdev_priv(dev);
  450. struct mlx4_en_dev *mdev = priv->mdev;
  451. u32 autoneg = AUTONEG_DISABLE;
  452. if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
  453. (priv->port_state.flags & MLX4_EN_PORT_ANE))
  454. autoneg = AUTONEG_ENABLE;
  455. return autoneg;
  456. }
  457. static void ptys2ethtool_update_supported_port(unsigned long *mask,
  458. struct mlx4_ptys_reg *ptys_reg)
  459. {
  460. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  461. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  462. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  463. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  464. __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
  465. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  466. | MLX4_PROT_MASK(MLX4_10GBASE_SR)
  467. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  468. | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
  469. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  470. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  471. __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
  472. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  473. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  474. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  475. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  476. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  477. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  478. __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
  479. }
  480. }
  481. static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
  482. {
  483. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
  484. if (!eth_proto) /* link down */
  485. eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  486. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  487. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  488. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  489. return PORT_TP;
  490. }
  491. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
  492. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  493. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  494. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  495. return PORT_FIBRE;
  496. }
  497. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  498. | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
  499. | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
  500. return PORT_DA;
  501. }
  502. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  503. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  504. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  505. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  506. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  507. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  508. return PORT_NONE;
  509. }
  510. return PORT_OTHER;
  511. }
  512. #define MLX4_LINK_MODES_SZ \
  513. (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
  514. enum ethtool_report {
  515. SUPPORTED = 0,
  516. ADVERTISED = 1,
  517. };
  518. struct ptys2ethtool_config {
  519. __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
  520. __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
  521. u32 speed;
  522. };
  523. static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
  524. enum ethtool_report report)
  525. {
  526. switch (report) {
  527. case SUPPORTED:
  528. return cfg->supported;
  529. case ADVERTISED:
  530. return cfg->advertised;
  531. }
  532. return NULL;
  533. }
  534. #define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
  535. ({ \
  536. struct ptys2ethtool_config *cfg; \
  537. const unsigned int modes[] = { __VA_ARGS__ }; \
  538. unsigned int i; \
  539. cfg = &ptys2ethtool_map[reg_]; \
  540. cfg->speed = speed_; \
  541. bitmap_zero(cfg->supported, \
  542. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  543. bitmap_zero(cfg->advertised, \
  544. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  545. for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
  546. __set_bit(modes[i], cfg->supported); \
  547. __set_bit(modes[i], cfg->advertised); \
  548. } \
  549. })
  550. /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
  551. static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ];
  552. void __init mlx4_en_init_ptys2ethtool_map(void)
  553. {
  554. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100,
  555. ETHTOOL_LINK_MODE_100baseT_Full_BIT);
  556. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
  557. ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
  558. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
  559. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  560. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
  561. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  562. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
  563. ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
  564. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000,
  565. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  566. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000,
  567. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  568. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
  569. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  570. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
  571. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  572. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
  573. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  574. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
  575. ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
  576. ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
  577. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000,
  578. ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
  579. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000,
  580. ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
  581. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000,
  582. ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
  583. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000,
  584. ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
  585. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000,
  586. ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT);
  587. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
  588. ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
  589. };
  590. static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
  591. u32 eth_proto,
  592. enum ethtool_report report)
  593. {
  594. int i;
  595. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  596. if (eth_proto & MLX4_PROT_MASK(i))
  597. bitmap_or(link_modes, link_modes,
  598. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  599. report),
  600. __ETHTOOL_LINK_MODE_MASK_NBITS);
  601. }
  602. }
  603. static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
  604. enum ethtool_report report)
  605. {
  606. int i;
  607. u32 ptys_modes = 0;
  608. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  609. if (bitmap_intersects(
  610. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  611. report),
  612. link_modes,
  613. __ETHTOOL_LINK_MODE_MASK_NBITS))
  614. ptys_modes |= 1 << i;
  615. }
  616. return ptys_modes;
  617. }
  618. /* Convert actual speed (SPEED_XXX) to ptys link modes */
  619. static u32 speed2ptys_link_modes(u32 speed)
  620. {
  621. int i;
  622. u32 ptys_modes = 0;
  623. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  624. if (ptys2ethtool_map[i].speed == speed)
  625. ptys_modes |= 1 << i;
  626. }
  627. return ptys_modes;
  628. }
  629. static int
  630. ethtool_get_ptys_link_ksettings(struct net_device *dev,
  631. struct ethtool_link_ksettings *link_ksettings)
  632. {
  633. struct mlx4_en_priv *priv = netdev_priv(dev);
  634. struct mlx4_ptys_reg ptys_reg;
  635. u32 eth_proto;
  636. int ret;
  637. memset(&ptys_reg, 0, sizeof(ptys_reg));
  638. ptys_reg.local_port = priv->port;
  639. ptys_reg.proto_mask = MLX4_PTYS_EN;
  640. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  641. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  642. if (ret) {
  643. en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
  644. ret);
  645. return ret;
  646. }
  647. en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
  648. ptys_reg.proto_mask);
  649. en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
  650. be32_to_cpu(ptys_reg.eth_proto_cap));
  651. en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
  652. be32_to_cpu(ptys_reg.eth_proto_admin));
  653. en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
  654. be32_to_cpu(ptys_reg.eth_proto_oper));
  655. en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
  656. be32_to_cpu(ptys_reg.eth_proto_lp_adv));
  657. /* reset supported/advertising masks */
  658. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  659. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  660. ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported,
  661. &ptys_reg);
  662. eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
  663. ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported,
  664. eth_proto, SUPPORTED);
  665. eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
  666. ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising,
  667. eth_proto, ADVERTISED);
  668. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  669. Pause);
  670. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  671. Asym_Pause);
  672. if (priv->prof->tx_pause)
  673. ethtool_link_ksettings_add_link_mode(link_ksettings,
  674. advertising, Pause);
  675. if (priv->prof->tx_pause ^ priv->prof->rx_pause)
  676. ethtool_link_ksettings_add_link_mode(link_ksettings,
  677. advertising, Asym_Pause);
  678. link_ksettings->base.port = ptys_get_active_port(&ptys_reg);
  679. if (mlx4_en_autoneg_get(dev)) {
  680. ethtool_link_ksettings_add_link_mode(link_ksettings,
  681. supported, Autoneg);
  682. ethtool_link_ksettings_add_link_mode(link_ksettings,
  683. advertising, Autoneg);
  684. }
  685. link_ksettings->base.autoneg
  686. = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  687. AUTONEG_ENABLE : AUTONEG_DISABLE;
  688. eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
  689. ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
  690. ptys2ethtool_update_link_modes(
  691. link_ksettings->link_modes.lp_advertising,
  692. eth_proto, ADVERTISED);
  693. if (priv->port_state.flags & MLX4_EN_PORT_ANC)
  694. ethtool_link_ksettings_add_link_mode(link_ksettings,
  695. lp_advertising, Autoneg);
  696. link_ksettings->base.phy_address = 0;
  697. link_ksettings->base.mdio_support = 0;
  698. link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
  699. link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
  700. return ret;
  701. }
  702. static void
  703. ethtool_get_default_link_ksettings(
  704. struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
  705. {
  706. struct mlx4_en_priv *priv = netdev_priv(dev);
  707. int trans_type;
  708. link_ksettings->base.autoneg = AUTONEG_DISABLE;
  709. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  710. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  711. 10000baseT_Full);
  712. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  713. ethtool_link_ksettings_add_link_mode(link_ksettings, advertising,
  714. 10000baseT_Full);
  715. trans_type = priv->port_state.transceiver;
  716. if (trans_type > 0 && trans_type <= 0xC) {
  717. link_ksettings->base.port = PORT_FIBRE;
  718. ethtool_link_ksettings_add_link_mode(link_ksettings,
  719. supported, FIBRE);
  720. ethtool_link_ksettings_add_link_mode(link_ksettings,
  721. advertising, FIBRE);
  722. } else if (trans_type == 0x80 || trans_type == 0) {
  723. link_ksettings->base.port = PORT_TP;
  724. ethtool_link_ksettings_add_link_mode(link_ksettings,
  725. supported, TP);
  726. ethtool_link_ksettings_add_link_mode(link_ksettings,
  727. advertising, TP);
  728. } else {
  729. link_ksettings->base.port = -1;
  730. }
  731. }
  732. static int
  733. mlx4_en_get_link_ksettings(struct net_device *dev,
  734. struct ethtool_link_ksettings *link_ksettings)
  735. {
  736. struct mlx4_en_priv *priv = netdev_priv(dev);
  737. int ret = -EINVAL;
  738. if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
  739. return -ENOMEM;
  740. en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
  741. priv->port_state.flags & MLX4_EN_PORT_ANC,
  742. priv->port_state.flags & MLX4_EN_PORT_ANE);
  743. if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
  744. ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
  745. if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
  746. ethtool_get_default_link_ksettings(dev, link_ksettings);
  747. if (netif_carrier_ok(dev)) {
  748. link_ksettings->base.speed = priv->port_state.link_speed;
  749. link_ksettings->base.duplex = DUPLEX_FULL;
  750. } else {
  751. link_ksettings->base.speed = SPEED_UNKNOWN;
  752. link_ksettings->base.duplex = DUPLEX_UNKNOWN;
  753. }
  754. return 0;
  755. }
  756. /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
  757. static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
  758. __be32 proto_cap)
  759. {
  760. __be32 proto_admin = 0;
  761. if (!speed) { /* Speed = 0 ==> Reset Link modes */
  762. proto_admin = proto_cap;
  763. en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
  764. be32_to_cpu(proto_cap));
  765. } else {
  766. u32 ptys_link_modes = speed2ptys_link_modes(speed);
  767. proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
  768. en_info(priv, "Setting Speed to %d\n", speed);
  769. }
  770. return proto_admin;
  771. }
  772. static int
  773. mlx4_en_set_link_ksettings(struct net_device *dev,
  774. const struct ethtool_link_ksettings *link_ksettings)
  775. {
  776. struct mlx4_en_priv *priv = netdev_priv(dev);
  777. struct mlx4_ptys_reg ptys_reg;
  778. __be32 proto_admin;
  779. u8 cur_autoneg;
  780. int ret;
  781. u32 ptys_adv = ethtool2ptys_link_modes(
  782. link_ksettings->link_modes.advertising, ADVERTISED);
  783. const int speed = link_ksettings->base.speed;
  784. en_dbg(DRV, priv,
  785. "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n",
  786. speed, __ETHTOOL_LINK_MODE_MASK_NBITS,
  787. link_ksettings->link_modes.advertising,
  788. link_ksettings->base.autoneg,
  789. link_ksettings->base.duplex);
  790. if (!(priv->mdev->dev->caps.flags2 &
  791. MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
  792. (link_ksettings->base.duplex == DUPLEX_HALF))
  793. return -EINVAL;
  794. memset(&ptys_reg, 0, sizeof(ptys_reg));
  795. ptys_reg.local_port = priv->port;
  796. ptys_reg.proto_mask = MLX4_PTYS_EN;
  797. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  798. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  799. if (ret) {
  800. en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
  801. ret);
  802. return 0;
  803. }
  804. cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
  805. AUTONEG_DISABLE : AUTONEG_ENABLE;
  806. if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
  807. proto_admin = speed_set_ptys_admin(priv, speed,
  808. ptys_reg.eth_proto_cap);
  809. if ((be32_to_cpu(proto_admin) &
  810. (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
  811. MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
  812. (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
  813. ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
  814. } else {
  815. proto_admin = cpu_to_be32(ptys_adv);
  816. ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
  817. }
  818. proto_admin &= ptys_reg.eth_proto_cap;
  819. if (!proto_admin) {
  820. en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
  821. return -EINVAL; /* nothing to change due to bad input */
  822. }
  823. if ((proto_admin == ptys_reg.eth_proto_admin) &&
  824. ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
  825. (link_ksettings->base.autoneg == cur_autoneg)))
  826. return 0; /* Nothing to change */
  827. en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
  828. be32_to_cpu(proto_admin));
  829. ptys_reg.eth_proto_admin = proto_admin;
  830. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
  831. &ptys_reg);
  832. if (ret) {
  833. en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
  834. be32_to_cpu(ptys_reg.eth_proto_admin), ret);
  835. return ret;
  836. }
  837. mutex_lock(&priv->mdev->state_lock);
  838. if (priv->port_up) {
  839. en_warn(priv, "Port link mode changed, restarting port...\n");
  840. mlx4_en_stop_port(dev, 1);
  841. if (mlx4_en_start_port(dev))
  842. en_err(priv, "Failed restarting port %d\n", priv->port);
  843. }
  844. mutex_unlock(&priv->mdev->state_lock);
  845. return 0;
  846. }
  847. static int mlx4_en_get_coalesce(struct net_device *dev,
  848. struct ethtool_coalesce *coal)
  849. {
  850. struct mlx4_en_priv *priv = netdev_priv(dev);
  851. coal->tx_coalesce_usecs = priv->tx_usecs;
  852. coal->tx_max_coalesced_frames = priv->tx_frames;
  853. coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
  854. coal->rx_coalesce_usecs = priv->rx_usecs;
  855. coal->rx_max_coalesced_frames = priv->rx_frames;
  856. coal->pkt_rate_low = priv->pkt_rate_low;
  857. coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
  858. coal->pkt_rate_high = priv->pkt_rate_high;
  859. coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
  860. coal->rate_sample_interval = priv->sample_interval;
  861. coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
  862. return 0;
  863. }
  864. static int mlx4_en_set_coalesce(struct net_device *dev,
  865. struct ethtool_coalesce *coal)
  866. {
  867. struct mlx4_en_priv *priv = netdev_priv(dev);
  868. if (!coal->tx_max_coalesced_frames_irq)
  869. return -EINVAL;
  870. priv->rx_frames = (coal->rx_max_coalesced_frames ==
  871. MLX4_EN_AUTO_CONF) ?
  872. MLX4_EN_RX_COAL_TARGET :
  873. coal->rx_max_coalesced_frames;
  874. priv->rx_usecs = (coal->rx_coalesce_usecs ==
  875. MLX4_EN_AUTO_CONF) ?
  876. MLX4_EN_RX_COAL_TIME :
  877. coal->rx_coalesce_usecs;
  878. /* Setting TX coalescing parameters */
  879. if (coal->tx_coalesce_usecs != priv->tx_usecs ||
  880. coal->tx_max_coalesced_frames != priv->tx_frames) {
  881. priv->tx_usecs = coal->tx_coalesce_usecs;
  882. priv->tx_frames = coal->tx_max_coalesced_frames;
  883. }
  884. /* Set adaptive coalescing params */
  885. priv->pkt_rate_low = coal->pkt_rate_low;
  886. priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
  887. priv->pkt_rate_high = coal->pkt_rate_high;
  888. priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
  889. priv->sample_interval = coal->rate_sample_interval;
  890. priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
  891. priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
  892. return mlx4_en_moderation_update(priv);
  893. }
  894. static int mlx4_en_set_pauseparam(struct net_device *dev,
  895. struct ethtool_pauseparam *pause)
  896. {
  897. struct mlx4_en_priv *priv = netdev_priv(dev);
  898. struct mlx4_en_dev *mdev = priv->mdev;
  899. int err;
  900. if (pause->autoneg)
  901. return -EINVAL;
  902. priv->prof->tx_pause = pause->tx_pause != 0;
  903. priv->prof->rx_pause = pause->rx_pause != 0;
  904. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  905. priv->rx_skb_size + ETH_FCS_LEN,
  906. priv->prof->tx_pause,
  907. priv->prof->tx_ppp,
  908. priv->prof->rx_pause,
  909. priv->prof->rx_ppp);
  910. if (err)
  911. en_err(priv, "Failed setting pause params\n");
  912. else
  913. mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
  914. priv->prof->rx_ppp,
  915. priv->prof->rx_pause,
  916. priv->prof->tx_ppp,
  917. priv->prof->tx_pause);
  918. return err;
  919. }
  920. static void mlx4_en_get_pauseparam(struct net_device *dev,
  921. struct ethtool_pauseparam *pause)
  922. {
  923. struct mlx4_en_priv *priv = netdev_priv(dev);
  924. pause->tx_pause = priv->prof->tx_pause;
  925. pause->rx_pause = priv->prof->rx_pause;
  926. }
  927. static int mlx4_en_set_ringparam(struct net_device *dev,
  928. struct ethtool_ringparam *param)
  929. {
  930. struct mlx4_en_priv *priv = netdev_priv(dev);
  931. struct mlx4_en_dev *mdev = priv->mdev;
  932. struct mlx4_en_port_profile new_prof;
  933. struct mlx4_en_priv *tmp;
  934. u32 rx_size, tx_size;
  935. int port_up = 0;
  936. int err = 0;
  937. if (param->rx_jumbo_pending || param->rx_mini_pending)
  938. return -EINVAL;
  939. rx_size = roundup_pow_of_two(param->rx_pending);
  940. rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
  941. rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
  942. tx_size = roundup_pow_of_two(param->tx_pending);
  943. tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
  944. tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
  945. if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
  946. priv->rx_ring[0]->size) &&
  947. tx_size == priv->tx_ring[TX][0]->size)
  948. return 0;
  949. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  950. if (!tmp)
  951. return -ENOMEM;
  952. mutex_lock(&mdev->state_lock);
  953. memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
  954. new_prof.tx_ring_size = tx_size;
  955. new_prof.rx_ring_size = rx_size;
  956. err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
  957. if (err)
  958. goto out;
  959. if (priv->port_up) {
  960. port_up = 1;
  961. mlx4_en_stop_port(dev, 1);
  962. }
  963. mlx4_en_safe_replace_resources(priv, tmp);
  964. if (port_up) {
  965. err = mlx4_en_start_port(dev);
  966. if (err)
  967. en_err(priv, "Failed starting port\n");
  968. }
  969. err = mlx4_en_moderation_update(priv);
  970. out:
  971. kfree(tmp);
  972. mutex_unlock(&mdev->state_lock);
  973. return err;
  974. }
  975. static void mlx4_en_get_ringparam(struct net_device *dev,
  976. struct ethtool_ringparam *param)
  977. {
  978. struct mlx4_en_priv *priv = netdev_priv(dev);
  979. memset(param, 0, sizeof(*param));
  980. param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
  981. param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
  982. param->rx_pending = priv->port_up ?
  983. priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
  984. param->tx_pending = priv->tx_ring[TX][0]->size;
  985. }
  986. static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
  987. {
  988. struct mlx4_en_priv *priv = netdev_priv(dev);
  989. return rounddown_pow_of_two(priv->rx_ring_num);
  990. }
  991. static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
  992. {
  993. return MLX4_EN_RSS_KEY_SIZE;
  994. }
  995. static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
  996. {
  997. struct mlx4_en_priv *priv = netdev_priv(dev);
  998. /* check if requested function is supported by the device */
  999. if (hfunc == ETH_RSS_HASH_TOP) {
  1000. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
  1001. return -EINVAL;
  1002. if (!(dev->features & NETIF_F_RXHASH))
  1003. en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
  1004. return 0;
  1005. } else if (hfunc == ETH_RSS_HASH_XOR) {
  1006. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
  1007. return -EINVAL;
  1008. if (dev->features & NETIF_F_RXHASH)
  1009. en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
  1010. return 0;
  1011. }
  1012. return -EINVAL;
  1013. }
  1014. static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
  1015. u8 *hfunc)
  1016. {
  1017. struct mlx4_en_priv *priv = netdev_priv(dev);
  1018. u32 n = mlx4_en_get_rxfh_indir_size(dev);
  1019. u32 i, rss_rings;
  1020. int err = 0;
  1021. rss_rings = priv->prof->rss_rings ?: n;
  1022. rss_rings = rounddown_pow_of_two(rss_rings);
  1023. for (i = 0; i < n; i++) {
  1024. if (!ring_index)
  1025. break;
  1026. ring_index[i] = i % rss_rings;
  1027. }
  1028. if (key)
  1029. memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
  1030. if (hfunc)
  1031. *hfunc = priv->rss_hash_fn;
  1032. return err;
  1033. }
  1034. static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
  1035. const u8 *key, const u8 hfunc)
  1036. {
  1037. struct mlx4_en_priv *priv = netdev_priv(dev);
  1038. u32 n = mlx4_en_get_rxfh_indir_size(dev);
  1039. struct mlx4_en_dev *mdev = priv->mdev;
  1040. int port_up = 0;
  1041. int err = 0;
  1042. int i;
  1043. int rss_rings = 0;
  1044. /* Calculate RSS table size and make sure flows are spread evenly
  1045. * between rings
  1046. */
  1047. for (i = 0; i < n; i++) {
  1048. if (!ring_index)
  1049. break;
  1050. if (i > 0 && !ring_index[i] && !rss_rings)
  1051. rss_rings = i;
  1052. if (ring_index[i] != (i % (rss_rings ?: n)))
  1053. return -EINVAL;
  1054. }
  1055. if (!rss_rings)
  1056. rss_rings = n;
  1057. /* RSS table size must be an order of 2 */
  1058. if (!is_power_of_2(rss_rings))
  1059. return -EINVAL;
  1060. if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
  1061. err = mlx4_en_check_rxfh_func(dev, hfunc);
  1062. if (err)
  1063. return err;
  1064. }
  1065. mutex_lock(&mdev->state_lock);
  1066. if (priv->port_up) {
  1067. port_up = 1;
  1068. mlx4_en_stop_port(dev, 1);
  1069. }
  1070. if (ring_index)
  1071. priv->prof->rss_rings = rss_rings;
  1072. if (key)
  1073. memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
  1074. if (hfunc != ETH_RSS_HASH_NO_CHANGE)
  1075. priv->rss_hash_fn = hfunc;
  1076. if (port_up) {
  1077. err = mlx4_en_start_port(dev);
  1078. if (err)
  1079. en_err(priv, "Failed starting port\n");
  1080. }
  1081. mutex_unlock(&mdev->state_lock);
  1082. return err;
  1083. }
  1084. #define all_zeros_or_all_ones(field) \
  1085. ((field) == 0 || (field) == (__force typeof(field))-1)
  1086. static int mlx4_en_validate_flow(struct net_device *dev,
  1087. struct ethtool_rxnfc *cmd)
  1088. {
  1089. struct ethtool_usrip4_spec *l3_mask;
  1090. struct ethtool_tcpip4_spec *l4_mask;
  1091. struct ethhdr *eth_mask;
  1092. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1093. return -EINVAL;
  1094. if (cmd->fs.flow_type & FLOW_MAC_EXT) {
  1095. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1096. if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
  1097. return -EINVAL;
  1098. }
  1099. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1100. case TCP_V4_FLOW:
  1101. case UDP_V4_FLOW:
  1102. if (cmd->fs.m_u.tcp_ip4_spec.tos)
  1103. return -EINVAL;
  1104. l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1105. /* don't allow mask which isn't all 0 or 1 */
  1106. if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
  1107. !all_zeros_or_all_ones(l4_mask->ip4dst) ||
  1108. !all_zeros_or_all_ones(l4_mask->psrc) ||
  1109. !all_zeros_or_all_ones(l4_mask->pdst))
  1110. return -EINVAL;
  1111. break;
  1112. case IP_USER_FLOW:
  1113. l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1114. if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
  1115. cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
  1116. (!l3_mask->ip4src && !l3_mask->ip4dst) ||
  1117. !all_zeros_or_all_ones(l3_mask->ip4src) ||
  1118. !all_zeros_or_all_ones(l3_mask->ip4dst))
  1119. return -EINVAL;
  1120. break;
  1121. case ETHER_FLOW:
  1122. eth_mask = &cmd->fs.m_u.ether_spec;
  1123. /* source mac mask must not be set */
  1124. if (!is_zero_ether_addr(eth_mask->h_source))
  1125. return -EINVAL;
  1126. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1127. if (!is_broadcast_ether_addr(eth_mask->h_dest))
  1128. return -EINVAL;
  1129. if (!all_zeros_or_all_ones(eth_mask->h_proto))
  1130. return -EINVAL;
  1131. break;
  1132. default:
  1133. return -EINVAL;
  1134. }
  1135. if ((cmd->fs.flow_type & FLOW_EXT)) {
  1136. if (cmd->fs.m_ext.vlan_etype ||
  1137. !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1138. 0 ||
  1139. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1140. cpu_to_be16(VLAN_VID_MASK)))
  1141. return -EINVAL;
  1142. if (cmd->fs.m_ext.vlan_tci) {
  1143. if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
  1144. return -EINVAL;
  1145. }
  1146. }
  1147. return 0;
  1148. }
  1149. static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
  1150. struct list_head *rule_list_h,
  1151. struct mlx4_spec_list *spec_l2,
  1152. unsigned char *mac)
  1153. {
  1154. int err = 0;
  1155. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  1156. spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
  1157. memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
  1158. memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
  1159. if ((cmd->fs.flow_type & FLOW_EXT) &&
  1160. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
  1161. spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
  1162. spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
  1163. }
  1164. list_add_tail(&spec_l2->list, rule_list_h);
  1165. return err;
  1166. }
  1167. static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
  1168. struct ethtool_rxnfc *cmd,
  1169. struct list_head *rule_list_h,
  1170. struct mlx4_spec_list *spec_l2,
  1171. __be32 ipv4_dst)
  1172. {
  1173. #ifdef CONFIG_INET
  1174. unsigned char mac[ETH_ALEN];
  1175. if (!ipv4_is_multicast(ipv4_dst)) {
  1176. if (cmd->fs.flow_type & FLOW_MAC_EXT)
  1177. memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
  1178. else
  1179. memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
  1180. } else {
  1181. ip_eth_mc_map(ipv4_dst, mac);
  1182. }
  1183. return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
  1184. #else
  1185. return -EINVAL;
  1186. #endif
  1187. }
  1188. static int add_ip_rule(struct mlx4_en_priv *priv,
  1189. struct ethtool_rxnfc *cmd,
  1190. struct list_head *list_h)
  1191. {
  1192. int err;
  1193. struct mlx4_spec_list *spec_l2 = NULL;
  1194. struct mlx4_spec_list *spec_l3 = NULL;
  1195. struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1196. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1197. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1198. if (!spec_l2 || !spec_l3) {
  1199. err = -ENOMEM;
  1200. goto free_spec;
  1201. }
  1202. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
  1203. cmd->fs.h_u.
  1204. usr_ip4_spec.ip4dst);
  1205. if (err)
  1206. goto free_spec;
  1207. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1208. spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
  1209. if (l3_mask->ip4src)
  1210. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1211. spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
  1212. if (l3_mask->ip4dst)
  1213. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1214. list_add_tail(&spec_l3->list, list_h);
  1215. return 0;
  1216. free_spec:
  1217. kfree(spec_l2);
  1218. kfree(spec_l3);
  1219. return err;
  1220. }
  1221. static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
  1222. struct ethtool_rxnfc *cmd,
  1223. struct list_head *list_h, int proto)
  1224. {
  1225. int err;
  1226. struct mlx4_spec_list *spec_l2 = NULL;
  1227. struct mlx4_spec_list *spec_l3 = NULL;
  1228. struct mlx4_spec_list *spec_l4 = NULL;
  1229. struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1230. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1231. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1232. spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
  1233. if (!spec_l2 || !spec_l3 || !spec_l4) {
  1234. err = -ENOMEM;
  1235. goto free_spec;
  1236. }
  1237. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1238. if (proto == TCP_V4_FLOW) {
  1239. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1240. spec_l2,
  1241. cmd->fs.h_u.
  1242. tcp_ip4_spec.ip4dst);
  1243. if (err)
  1244. goto free_spec;
  1245. spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
  1246. spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
  1247. spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
  1248. spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
  1249. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
  1250. } else {
  1251. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1252. spec_l2,
  1253. cmd->fs.h_u.
  1254. udp_ip4_spec.ip4dst);
  1255. if (err)
  1256. goto free_spec;
  1257. spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
  1258. spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
  1259. spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
  1260. spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
  1261. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
  1262. }
  1263. if (l4_mask->ip4src)
  1264. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1265. if (l4_mask->ip4dst)
  1266. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1267. if (l4_mask->psrc)
  1268. spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
  1269. if (l4_mask->pdst)
  1270. spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
  1271. list_add_tail(&spec_l3->list, list_h);
  1272. list_add_tail(&spec_l4->list, list_h);
  1273. return 0;
  1274. free_spec:
  1275. kfree(spec_l2);
  1276. kfree(spec_l3);
  1277. kfree(spec_l4);
  1278. return err;
  1279. }
  1280. static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
  1281. struct ethtool_rxnfc *cmd,
  1282. struct list_head *rule_list_h)
  1283. {
  1284. int err;
  1285. struct ethhdr *eth_spec;
  1286. struct mlx4_spec_list *spec_l2;
  1287. struct mlx4_en_priv *priv = netdev_priv(dev);
  1288. err = mlx4_en_validate_flow(dev, cmd);
  1289. if (err)
  1290. return err;
  1291. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1292. case ETHER_FLOW:
  1293. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1294. if (!spec_l2)
  1295. return -ENOMEM;
  1296. eth_spec = &cmd->fs.h_u.ether_spec;
  1297. mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
  1298. &eth_spec->h_dest[0]);
  1299. spec_l2->eth.ether_type = eth_spec->h_proto;
  1300. if (eth_spec->h_proto)
  1301. spec_l2->eth.ether_type_enable = 1;
  1302. break;
  1303. case IP_USER_FLOW:
  1304. err = add_ip_rule(priv, cmd, rule_list_h);
  1305. break;
  1306. case TCP_V4_FLOW:
  1307. err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
  1308. break;
  1309. case UDP_V4_FLOW:
  1310. err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
  1311. break;
  1312. }
  1313. return err;
  1314. }
  1315. static int mlx4_en_flow_replace(struct net_device *dev,
  1316. struct ethtool_rxnfc *cmd)
  1317. {
  1318. int err;
  1319. struct mlx4_en_priv *priv = netdev_priv(dev);
  1320. struct ethtool_flow_id *loc_rule;
  1321. struct mlx4_spec_list *spec, *tmp_spec;
  1322. u32 qpn;
  1323. u64 reg_id;
  1324. struct mlx4_net_trans_rule rule = {
  1325. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  1326. .exclusive = 0,
  1327. .allow_loopback = 1,
  1328. .promisc_mode = MLX4_FS_REGULAR,
  1329. };
  1330. rule.port = priv->port;
  1331. rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
  1332. INIT_LIST_HEAD(&rule.list);
  1333. /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
  1334. if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
  1335. qpn = priv->drop_qp.qpn;
  1336. else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
  1337. qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
  1338. if (qpn < priv->rss_map.base_qpn ||
  1339. qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
  1340. en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
  1341. return -EINVAL;
  1342. }
  1343. } else {
  1344. if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
  1345. en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
  1346. cmd->fs.ring_cookie);
  1347. return -EINVAL;
  1348. }
  1349. qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
  1350. if (!qpn) {
  1351. en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
  1352. cmd->fs.ring_cookie);
  1353. return -EINVAL;
  1354. }
  1355. }
  1356. rule.qpn = qpn;
  1357. err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
  1358. if (err)
  1359. goto out_free_list;
  1360. loc_rule = &priv->ethtool_rules[cmd->fs.location];
  1361. if (loc_rule->id) {
  1362. err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
  1363. if (err) {
  1364. en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
  1365. cmd->fs.location, loc_rule->id);
  1366. goto out_free_list;
  1367. }
  1368. loc_rule->id = 0;
  1369. memset(&loc_rule->flow_spec, 0,
  1370. sizeof(struct ethtool_rx_flow_spec));
  1371. list_del(&loc_rule->list);
  1372. }
  1373. err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
  1374. if (err) {
  1375. en_err(priv, "Fail to attach network rule at location %d\n",
  1376. cmd->fs.location);
  1377. goto out_free_list;
  1378. }
  1379. loc_rule->id = reg_id;
  1380. memcpy(&loc_rule->flow_spec, &cmd->fs,
  1381. sizeof(struct ethtool_rx_flow_spec));
  1382. list_add_tail(&loc_rule->list, &priv->ethtool_list);
  1383. out_free_list:
  1384. list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
  1385. list_del(&spec->list);
  1386. kfree(spec);
  1387. }
  1388. return err;
  1389. }
  1390. static int mlx4_en_flow_detach(struct net_device *dev,
  1391. struct ethtool_rxnfc *cmd)
  1392. {
  1393. int err = 0;
  1394. struct ethtool_flow_id *rule;
  1395. struct mlx4_en_priv *priv = netdev_priv(dev);
  1396. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1397. return -EINVAL;
  1398. rule = &priv->ethtool_rules[cmd->fs.location];
  1399. if (!rule->id) {
  1400. err = -ENOENT;
  1401. goto out;
  1402. }
  1403. err = mlx4_flow_detach(priv->mdev->dev, rule->id);
  1404. if (err) {
  1405. en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
  1406. cmd->fs.location, rule->id);
  1407. goto out;
  1408. }
  1409. rule->id = 0;
  1410. memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
  1411. list_del(&rule->list);
  1412. out:
  1413. return err;
  1414. }
  1415. static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1416. int loc)
  1417. {
  1418. int err = 0;
  1419. struct ethtool_flow_id *rule;
  1420. struct mlx4_en_priv *priv = netdev_priv(dev);
  1421. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  1422. return -EINVAL;
  1423. rule = &priv->ethtool_rules[loc];
  1424. if (rule->id)
  1425. memcpy(&cmd->fs, &rule->flow_spec,
  1426. sizeof(struct ethtool_rx_flow_spec));
  1427. else
  1428. err = -ENOENT;
  1429. return err;
  1430. }
  1431. static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
  1432. {
  1433. int i, res = 0;
  1434. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  1435. if (priv->ethtool_rules[i].id)
  1436. res++;
  1437. }
  1438. return res;
  1439. }
  1440. static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1441. u32 *rule_locs)
  1442. {
  1443. struct mlx4_en_priv *priv = netdev_priv(dev);
  1444. struct mlx4_en_dev *mdev = priv->mdev;
  1445. int err = 0;
  1446. int i = 0, priority = 0;
  1447. if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
  1448. cmd->cmd == ETHTOOL_GRXCLSRULE ||
  1449. cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
  1450. (mdev->dev->caps.steering_mode !=
  1451. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
  1452. return -EINVAL;
  1453. switch (cmd->cmd) {
  1454. case ETHTOOL_GRXRINGS:
  1455. cmd->data = priv->rx_ring_num;
  1456. break;
  1457. case ETHTOOL_GRXCLSRLCNT:
  1458. cmd->rule_cnt = mlx4_en_get_num_flows(priv);
  1459. break;
  1460. case ETHTOOL_GRXCLSRULE:
  1461. err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
  1462. break;
  1463. case ETHTOOL_GRXCLSRLALL:
  1464. while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
  1465. err = mlx4_en_get_flow(dev, cmd, i);
  1466. if (!err)
  1467. rule_locs[priority++] = i;
  1468. i++;
  1469. }
  1470. err = 0;
  1471. break;
  1472. default:
  1473. err = -EOPNOTSUPP;
  1474. break;
  1475. }
  1476. return err;
  1477. }
  1478. static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1479. {
  1480. int err = 0;
  1481. struct mlx4_en_priv *priv = netdev_priv(dev);
  1482. struct mlx4_en_dev *mdev = priv->mdev;
  1483. if (mdev->dev->caps.steering_mode !=
  1484. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
  1485. return -EINVAL;
  1486. switch (cmd->cmd) {
  1487. case ETHTOOL_SRXCLSRLINS:
  1488. err = mlx4_en_flow_replace(dev, cmd);
  1489. break;
  1490. case ETHTOOL_SRXCLSRLDEL:
  1491. err = mlx4_en_flow_detach(dev, cmd);
  1492. break;
  1493. default:
  1494. en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
  1495. return -EINVAL;
  1496. }
  1497. return err;
  1498. }
  1499. static void mlx4_en_get_channels(struct net_device *dev,
  1500. struct ethtool_channels *channel)
  1501. {
  1502. struct mlx4_en_priv *priv = netdev_priv(dev);
  1503. channel->max_rx = MAX_RX_RINGS;
  1504. channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
  1505. channel->rx_count = priv->rx_ring_num;
  1506. channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP;
  1507. }
  1508. static int mlx4_en_set_channels(struct net_device *dev,
  1509. struct ethtool_channels *channel)
  1510. {
  1511. struct mlx4_en_priv *priv = netdev_priv(dev);
  1512. struct mlx4_en_dev *mdev = priv->mdev;
  1513. struct mlx4_en_port_profile new_prof;
  1514. struct mlx4_en_priv *tmp;
  1515. int port_up = 0;
  1516. int xdp_count;
  1517. int err = 0;
  1518. if (!channel->tx_count || !channel->rx_count)
  1519. return -EINVAL;
  1520. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  1521. if (!tmp)
  1522. return -ENOMEM;
  1523. mutex_lock(&mdev->state_lock);
  1524. xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
  1525. if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) {
  1526. err = -EINVAL;
  1527. en_err(priv,
  1528. "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
  1529. channel->tx_count * MLX4_EN_NUM_UP + xdp_count,
  1530. MAX_TX_RINGS);
  1531. goto out;
  1532. }
  1533. memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
  1534. new_prof.num_tx_rings_p_up = channel->tx_count;
  1535. new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP;
  1536. new_prof.tx_ring_num[TX_XDP] = xdp_count;
  1537. new_prof.rx_ring_num = channel->rx_count;
  1538. err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
  1539. if (err)
  1540. goto out;
  1541. if (priv->port_up) {
  1542. port_up = 1;
  1543. mlx4_en_stop_port(dev, 1);
  1544. }
  1545. mlx4_en_safe_replace_resources(priv, tmp);
  1546. netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
  1547. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1548. if (netdev_get_num_tc(dev))
  1549. mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
  1550. en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
  1551. en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
  1552. if (port_up) {
  1553. err = mlx4_en_start_port(dev);
  1554. if (err)
  1555. en_err(priv, "Failed starting port\n");
  1556. }
  1557. err = mlx4_en_moderation_update(priv);
  1558. out:
  1559. mutex_unlock(&mdev->state_lock);
  1560. kfree(tmp);
  1561. return err;
  1562. }
  1563. static int mlx4_en_get_ts_info(struct net_device *dev,
  1564. struct ethtool_ts_info *info)
  1565. {
  1566. struct mlx4_en_priv *priv = netdev_priv(dev);
  1567. struct mlx4_en_dev *mdev = priv->mdev;
  1568. int ret;
  1569. ret = ethtool_op_get_ts_info(dev, info);
  1570. if (ret)
  1571. return ret;
  1572. if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
  1573. info->so_timestamping |=
  1574. SOF_TIMESTAMPING_TX_HARDWARE |
  1575. SOF_TIMESTAMPING_RX_HARDWARE |
  1576. SOF_TIMESTAMPING_RAW_HARDWARE;
  1577. info->tx_types =
  1578. (1 << HWTSTAMP_TX_OFF) |
  1579. (1 << HWTSTAMP_TX_ON);
  1580. info->rx_filters =
  1581. (1 << HWTSTAMP_FILTER_NONE) |
  1582. (1 << HWTSTAMP_FILTER_ALL);
  1583. if (mdev->ptp_clock)
  1584. info->phc_index = ptp_clock_index(mdev->ptp_clock);
  1585. }
  1586. return ret;
  1587. }
  1588. static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
  1589. {
  1590. struct mlx4_en_priv *priv = netdev_priv(dev);
  1591. struct mlx4_en_dev *mdev = priv->mdev;
  1592. bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1593. bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1594. bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
  1595. bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
  1596. int i;
  1597. int ret = 0;
  1598. if (bf_enabled_new != bf_enabled_old) {
  1599. int t;
  1600. if (bf_enabled_new) {
  1601. bool bf_supported = true;
  1602. for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
  1603. for (i = 0; i < priv->tx_ring_num[t]; i++)
  1604. bf_supported &=
  1605. priv->tx_ring[t][i]->bf_alloced;
  1606. if (!bf_supported) {
  1607. en_err(priv, "BlueFlame is not supported\n");
  1608. return -EINVAL;
  1609. }
  1610. priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1611. } else {
  1612. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1613. }
  1614. for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
  1615. for (i = 0; i < priv->tx_ring_num[t]; i++)
  1616. priv->tx_ring[t][i]->bf_enabled =
  1617. bf_enabled_new;
  1618. en_info(priv, "BlueFlame %s\n",
  1619. bf_enabled_new ? "Enabled" : "Disabled");
  1620. }
  1621. if (phv_enabled_new != phv_enabled_old) {
  1622. ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
  1623. if (ret)
  1624. return ret;
  1625. else if (phv_enabled_new)
  1626. priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
  1627. else
  1628. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
  1629. en_info(priv, "PHV bit %s\n",
  1630. phv_enabled_new ? "Enabled" : "Disabled");
  1631. }
  1632. return 0;
  1633. }
  1634. static u32 mlx4_en_get_priv_flags(struct net_device *dev)
  1635. {
  1636. struct mlx4_en_priv *priv = netdev_priv(dev);
  1637. return priv->pflags;
  1638. }
  1639. static int mlx4_en_get_tunable(struct net_device *dev,
  1640. const struct ethtool_tunable *tuna,
  1641. void *data)
  1642. {
  1643. const struct mlx4_en_priv *priv = netdev_priv(dev);
  1644. int ret = 0;
  1645. switch (tuna->id) {
  1646. case ETHTOOL_TX_COPYBREAK:
  1647. *(u32 *)data = priv->prof->inline_thold;
  1648. break;
  1649. default:
  1650. ret = -EINVAL;
  1651. break;
  1652. }
  1653. return ret;
  1654. }
  1655. static int mlx4_en_set_tunable(struct net_device *dev,
  1656. const struct ethtool_tunable *tuna,
  1657. const void *data)
  1658. {
  1659. struct mlx4_en_priv *priv = netdev_priv(dev);
  1660. int val, ret = 0;
  1661. switch (tuna->id) {
  1662. case ETHTOOL_TX_COPYBREAK:
  1663. val = *(u32 *)data;
  1664. if (val < MIN_PKT_LEN || val > MAX_INLINE)
  1665. ret = -EINVAL;
  1666. else
  1667. priv->prof->inline_thold = val;
  1668. break;
  1669. default:
  1670. ret = -EINVAL;
  1671. break;
  1672. }
  1673. return ret;
  1674. }
  1675. static int mlx4_en_get_module_info(struct net_device *dev,
  1676. struct ethtool_modinfo *modinfo)
  1677. {
  1678. struct mlx4_en_priv *priv = netdev_priv(dev);
  1679. struct mlx4_en_dev *mdev = priv->mdev;
  1680. int ret;
  1681. u8 data[4];
  1682. /* Read first 2 bytes to get Module & REV ID */
  1683. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1684. 0/*offset*/, 2/*size*/, data);
  1685. if (ret < 2)
  1686. return -EIO;
  1687. switch (data[0] /* identifier */) {
  1688. case MLX4_MODULE_ID_QSFP:
  1689. modinfo->type = ETH_MODULE_SFF_8436;
  1690. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1691. break;
  1692. case MLX4_MODULE_ID_QSFP_PLUS:
  1693. if (data[1] >= 0x3) { /* revision id */
  1694. modinfo->type = ETH_MODULE_SFF_8636;
  1695. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1696. } else {
  1697. modinfo->type = ETH_MODULE_SFF_8436;
  1698. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1699. }
  1700. break;
  1701. case MLX4_MODULE_ID_QSFP28:
  1702. modinfo->type = ETH_MODULE_SFF_8636;
  1703. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1704. break;
  1705. case MLX4_MODULE_ID_SFP:
  1706. modinfo->type = ETH_MODULE_SFF_8472;
  1707. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  1708. break;
  1709. default:
  1710. return -EINVAL;
  1711. }
  1712. return 0;
  1713. }
  1714. static int mlx4_en_get_module_eeprom(struct net_device *dev,
  1715. struct ethtool_eeprom *ee,
  1716. u8 *data)
  1717. {
  1718. struct mlx4_en_priv *priv = netdev_priv(dev);
  1719. struct mlx4_en_dev *mdev = priv->mdev;
  1720. int offset = ee->offset;
  1721. int i = 0, ret;
  1722. if (ee->len == 0)
  1723. return -EINVAL;
  1724. memset(data, 0, ee->len);
  1725. while (i < ee->len) {
  1726. en_dbg(DRV, priv,
  1727. "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
  1728. i, offset, ee->len - i);
  1729. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1730. offset, ee->len - i, data + i);
  1731. if (!ret) /* Done reading */
  1732. return 0;
  1733. if (ret < 0) {
  1734. en_err(priv,
  1735. "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
  1736. i, offset, ee->len - i, ret);
  1737. return 0;
  1738. }
  1739. i += ret;
  1740. offset += ret;
  1741. }
  1742. return 0;
  1743. }
  1744. static int mlx4_en_set_phys_id(struct net_device *dev,
  1745. enum ethtool_phys_id_state state)
  1746. {
  1747. int err;
  1748. u16 beacon_duration;
  1749. struct mlx4_en_priv *priv = netdev_priv(dev);
  1750. struct mlx4_en_dev *mdev = priv->mdev;
  1751. if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
  1752. return -EOPNOTSUPP;
  1753. switch (state) {
  1754. case ETHTOOL_ID_ACTIVE:
  1755. beacon_duration = PORT_BEACON_MAX_LIMIT;
  1756. break;
  1757. case ETHTOOL_ID_INACTIVE:
  1758. beacon_duration = 0;
  1759. break;
  1760. default:
  1761. return -EOPNOTSUPP;
  1762. }
  1763. err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
  1764. return err;
  1765. }
  1766. const struct ethtool_ops mlx4_en_ethtool_ops = {
  1767. .get_drvinfo = mlx4_en_get_drvinfo,
  1768. .get_link_ksettings = mlx4_en_get_link_ksettings,
  1769. .set_link_ksettings = mlx4_en_set_link_ksettings,
  1770. .get_link = ethtool_op_get_link,
  1771. .get_strings = mlx4_en_get_strings,
  1772. .get_sset_count = mlx4_en_get_sset_count,
  1773. .get_ethtool_stats = mlx4_en_get_ethtool_stats,
  1774. .self_test = mlx4_en_self_test,
  1775. .set_phys_id = mlx4_en_set_phys_id,
  1776. .get_wol = mlx4_en_get_wol,
  1777. .set_wol = mlx4_en_set_wol,
  1778. .get_msglevel = mlx4_en_get_msglevel,
  1779. .set_msglevel = mlx4_en_set_msglevel,
  1780. .get_coalesce = mlx4_en_get_coalesce,
  1781. .set_coalesce = mlx4_en_set_coalesce,
  1782. .get_pauseparam = mlx4_en_get_pauseparam,
  1783. .set_pauseparam = mlx4_en_set_pauseparam,
  1784. .get_ringparam = mlx4_en_get_ringparam,
  1785. .set_ringparam = mlx4_en_set_ringparam,
  1786. .get_rxnfc = mlx4_en_get_rxnfc,
  1787. .set_rxnfc = mlx4_en_set_rxnfc,
  1788. .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
  1789. .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
  1790. .get_rxfh = mlx4_en_get_rxfh,
  1791. .set_rxfh = mlx4_en_set_rxfh,
  1792. .get_channels = mlx4_en_get_channels,
  1793. .set_channels = mlx4_en_set_channels,
  1794. .get_ts_info = mlx4_en_get_ts_info,
  1795. .set_priv_flags = mlx4_en_set_priv_flags,
  1796. .get_priv_flags = mlx4_en_get_priv_flags,
  1797. .get_tunable = mlx4_en_get_tunable,
  1798. .set_tunable = mlx4_en_set_tunable,
  1799. .get_module_info = mlx4_en_get_module_info,
  1800. .get_module_eeprom = mlx4_en_get_module_eeprom
  1801. };