en_ethtool.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/mlx4/driver.h>
  37. #include <linux/mlx4/device.h>
  38. #include <linux/in.h>
  39. #include <net/ip.h>
  40. #include <linux/bitmap.h>
  41. #include "mlx4_en.h"
  42. #include "en_port.h"
  43. #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
  44. #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
  45. #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
  46. static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
  47. {
  48. int i;
  49. int err = 0;
  50. for (i = 0; i < priv->tx_ring_num; i++) {
  51. priv->tx_cq[i]->moder_cnt = priv->tx_frames;
  52. priv->tx_cq[i]->moder_time = priv->tx_usecs;
  53. if (priv->port_up) {
  54. err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
  55. if (err)
  56. return err;
  57. }
  58. }
  59. if (priv->adaptive_rx_coal)
  60. return 0;
  61. for (i = 0; i < priv->rx_ring_num; i++) {
  62. priv->rx_cq[i]->moder_cnt = priv->rx_frames;
  63. priv->rx_cq[i]->moder_time = priv->rx_usecs;
  64. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  65. if (priv->port_up) {
  66. err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
  67. if (err)
  68. return err;
  69. }
  70. }
  71. return err;
  72. }
  73. static void
  74. mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
  75. {
  76. struct mlx4_en_priv *priv = netdev_priv(dev);
  77. struct mlx4_en_dev *mdev = priv->mdev;
  78. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  79. strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
  80. sizeof(drvinfo->version));
  81. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  82. "%d.%d.%d",
  83. (u16) (mdev->dev->caps.fw_ver >> 32),
  84. (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
  85. (u16) (mdev->dev->caps.fw_ver & 0xffff));
  86. strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
  87. sizeof(drvinfo->bus_info));
  88. }
  89. static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
  90. "blueflame",
  91. "phv-bit"
  92. };
  93. static const char main_strings[][ETH_GSTRING_LEN] = {
  94. /* main statistics */
  95. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  96. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  97. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  98. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  99. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  100. "tx_heartbeat_errors", "tx_window_errors",
  101. /* port statistics */
  102. "tso_packets",
  103. "xmit_more",
  104. "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
  105. "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
  106. /* pf statistics */
  107. "pf_rx_packets",
  108. "pf_rx_bytes",
  109. "pf_tx_packets",
  110. "pf_tx_bytes",
  111. /* priority flow control statistics rx */
  112. "rx_pause_prio_0", "rx_pause_duration_prio_0",
  113. "rx_pause_transition_prio_0",
  114. "rx_pause_prio_1", "rx_pause_duration_prio_1",
  115. "rx_pause_transition_prio_1",
  116. "rx_pause_prio_2", "rx_pause_duration_prio_2",
  117. "rx_pause_transition_prio_2",
  118. "rx_pause_prio_3", "rx_pause_duration_prio_3",
  119. "rx_pause_transition_prio_3",
  120. "rx_pause_prio_4", "rx_pause_duration_prio_4",
  121. "rx_pause_transition_prio_4",
  122. "rx_pause_prio_5", "rx_pause_duration_prio_5",
  123. "rx_pause_transition_prio_5",
  124. "rx_pause_prio_6", "rx_pause_duration_prio_6",
  125. "rx_pause_transition_prio_6",
  126. "rx_pause_prio_7", "rx_pause_duration_prio_7",
  127. "rx_pause_transition_prio_7",
  128. /* flow control statistics rx */
  129. "rx_pause", "rx_pause_duration", "rx_pause_transition",
  130. /* priority flow control statistics tx */
  131. "tx_pause_prio_0", "tx_pause_duration_prio_0",
  132. "tx_pause_transition_prio_0",
  133. "tx_pause_prio_1", "tx_pause_duration_prio_1",
  134. "tx_pause_transition_prio_1",
  135. "tx_pause_prio_2", "tx_pause_duration_prio_2",
  136. "tx_pause_transition_prio_2",
  137. "tx_pause_prio_3", "tx_pause_duration_prio_3",
  138. "tx_pause_transition_prio_3",
  139. "tx_pause_prio_4", "tx_pause_duration_prio_4",
  140. "tx_pause_transition_prio_4",
  141. "tx_pause_prio_5", "tx_pause_duration_prio_5",
  142. "tx_pause_transition_prio_5",
  143. "tx_pause_prio_6", "tx_pause_duration_prio_6",
  144. "tx_pause_transition_prio_6",
  145. "tx_pause_prio_7", "tx_pause_duration_prio_7",
  146. "tx_pause_transition_prio_7",
  147. /* flow control statistics tx */
  148. "tx_pause", "tx_pause_duration", "tx_pause_transition",
  149. /* packet statistics */
  150. "rx_multicast_packets",
  151. "rx_broadcast_packets",
  152. "rx_jabbers",
  153. "rx_in_range_length_error",
  154. "rx_out_range_length_error",
  155. "tx_multicast_packets",
  156. "tx_broadcast_packets",
  157. "rx_prio_0_packets", "rx_prio_0_bytes",
  158. "rx_prio_1_packets", "rx_prio_1_bytes",
  159. "rx_prio_2_packets", "rx_prio_2_bytes",
  160. "rx_prio_3_packets", "rx_prio_3_bytes",
  161. "rx_prio_4_packets", "rx_prio_4_bytes",
  162. "rx_prio_5_packets", "rx_prio_5_bytes",
  163. "rx_prio_6_packets", "rx_prio_6_bytes",
  164. "rx_prio_7_packets", "rx_prio_7_bytes",
  165. "rx_novlan_packets", "rx_novlan_bytes",
  166. "tx_prio_0_packets", "tx_prio_0_bytes",
  167. "tx_prio_1_packets", "tx_prio_1_bytes",
  168. "tx_prio_2_packets", "tx_prio_2_bytes",
  169. "tx_prio_3_packets", "tx_prio_3_bytes",
  170. "tx_prio_4_packets", "tx_prio_4_bytes",
  171. "tx_prio_5_packets", "tx_prio_5_bytes",
  172. "tx_prio_6_packets", "tx_prio_6_bytes",
  173. "tx_prio_7_packets", "tx_prio_7_bytes",
  174. "tx_novlan_packets", "tx_novlan_bytes",
  175. };
  176. static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
  177. "Interrupt Test",
  178. "Link Test",
  179. "Speed Test",
  180. "Register Test",
  181. "Loopback Test",
  182. };
  183. static u32 mlx4_en_get_msglevel(struct net_device *dev)
  184. {
  185. return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
  186. }
  187. static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
  188. {
  189. ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
  190. }
  191. static void mlx4_en_get_wol(struct net_device *netdev,
  192. struct ethtool_wolinfo *wol)
  193. {
  194. struct mlx4_en_priv *priv = netdev_priv(netdev);
  195. int err = 0;
  196. u64 config = 0;
  197. u64 mask;
  198. if ((priv->port < 1) || (priv->port > 2)) {
  199. en_err(priv, "Failed to get WoL information\n");
  200. return;
  201. }
  202. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  203. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  204. if (!(priv->mdev->dev->caps.flags & mask)) {
  205. wol->supported = 0;
  206. wol->wolopts = 0;
  207. return;
  208. }
  209. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  210. if (err) {
  211. en_err(priv, "Failed to get WoL information\n");
  212. return;
  213. }
  214. if (config & MLX4_EN_WOL_MAGIC)
  215. wol->supported = WAKE_MAGIC;
  216. else
  217. wol->supported = 0;
  218. if (config & MLX4_EN_WOL_ENABLED)
  219. wol->wolopts = WAKE_MAGIC;
  220. else
  221. wol->wolopts = 0;
  222. }
  223. static int mlx4_en_set_wol(struct net_device *netdev,
  224. struct ethtool_wolinfo *wol)
  225. {
  226. struct mlx4_en_priv *priv = netdev_priv(netdev);
  227. u64 config = 0;
  228. int err = 0;
  229. u64 mask;
  230. if ((priv->port < 1) || (priv->port > 2))
  231. return -EOPNOTSUPP;
  232. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  233. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  234. if (!(priv->mdev->dev->caps.flags & mask))
  235. return -EOPNOTSUPP;
  236. if (wol->supported & ~WAKE_MAGIC)
  237. return -EINVAL;
  238. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  239. if (err) {
  240. en_err(priv, "Failed to get WoL info, unable to modify\n");
  241. return err;
  242. }
  243. if (wol->wolopts & WAKE_MAGIC) {
  244. config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
  245. MLX4_EN_WOL_MAGIC;
  246. } else {
  247. config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
  248. config |= MLX4_EN_WOL_DO_MODIFY;
  249. }
  250. err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
  251. if (err)
  252. en_err(priv, "Failed to set WoL information\n");
  253. return err;
  254. }
  255. struct bitmap_iterator {
  256. unsigned long *stats_bitmap;
  257. unsigned int count;
  258. unsigned int iterator;
  259. bool advance_array; /* if set, force no increments */
  260. };
  261. static inline void bitmap_iterator_init(struct bitmap_iterator *h,
  262. unsigned long *stats_bitmap,
  263. int count)
  264. {
  265. h->iterator = 0;
  266. h->advance_array = !bitmap_empty(stats_bitmap, count);
  267. h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
  268. : count;
  269. h->stats_bitmap = stats_bitmap;
  270. }
  271. static inline int bitmap_iterator_test(struct bitmap_iterator *h)
  272. {
  273. return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
  274. }
  275. static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
  276. {
  277. return h->iterator++;
  278. }
  279. static inline unsigned int
  280. bitmap_iterator_count(struct bitmap_iterator *h)
  281. {
  282. return h->count;
  283. }
  284. static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
  285. {
  286. struct mlx4_en_priv *priv = netdev_priv(dev);
  287. struct bitmap_iterator it;
  288. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  289. switch (sset) {
  290. case ETH_SS_STATS:
  291. return bitmap_iterator_count(&it) +
  292. (priv->tx_ring_num * 2) +
  293. (priv->rx_ring_num * 2);
  294. case ETH_SS_TEST:
  295. return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
  296. & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
  297. case ETH_SS_PRIV_FLAGS:
  298. return ARRAY_SIZE(mlx4_en_priv_flags);
  299. default:
  300. return -EOPNOTSUPP;
  301. }
  302. }
  303. static void mlx4_en_get_ethtool_stats(struct net_device *dev,
  304. struct ethtool_stats *stats, uint64_t *data)
  305. {
  306. struct mlx4_en_priv *priv = netdev_priv(dev);
  307. int index = 0;
  308. int i;
  309. struct bitmap_iterator it;
  310. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  311. spin_lock_bh(&priv->stats_lock);
  312. for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
  313. if (bitmap_iterator_test(&it))
  314. data[index++] = ((unsigned long *)&priv->stats)[i];
  315. for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
  316. if (bitmap_iterator_test(&it))
  317. data[index++] = ((unsigned long *)&priv->port_stats)[i];
  318. for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
  319. if (bitmap_iterator_test(&it))
  320. data[index++] =
  321. ((unsigned long *)&priv->pf_stats)[i];
  322. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
  323. i++, bitmap_iterator_inc(&it))
  324. if (bitmap_iterator_test(&it))
  325. data[index++] =
  326. ((u64 *)&priv->rx_priority_flowstats)[i];
  327. for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
  328. if (bitmap_iterator_test(&it))
  329. data[index++] = ((u64 *)&priv->rx_flowstats)[i];
  330. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
  331. i++, bitmap_iterator_inc(&it))
  332. if (bitmap_iterator_test(&it))
  333. data[index++] =
  334. ((u64 *)&priv->tx_priority_flowstats)[i];
  335. for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
  336. if (bitmap_iterator_test(&it))
  337. data[index++] = ((u64 *)&priv->tx_flowstats)[i];
  338. for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
  339. if (bitmap_iterator_test(&it))
  340. data[index++] = ((unsigned long *)&priv->pkstats)[i];
  341. for (i = 0; i < priv->tx_ring_num; i++) {
  342. data[index++] = priv->tx_ring[i]->packets;
  343. data[index++] = priv->tx_ring[i]->bytes;
  344. }
  345. for (i = 0; i < priv->rx_ring_num; i++) {
  346. data[index++] = priv->rx_ring[i]->packets;
  347. data[index++] = priv->rx_ring[i]->bytes;
  348. }
  349. spin_unlock_bh(&priv->stats_lock);
  350. }
  351. static void mlx4_en_self_test(struct net_device *dev,
  352. struct ethtool_test *etest, u64 *buf)
  353. {
  354. mlx4_en_ex_selftest(dev, &etest->flags, buf);
  355. }
  356. static void mlx4_en_get_strings(struct net_device *dev,
  357. uint32_t stringset, uint8_t *data)
  358. {
  359. struct mlx4_en_priv *priv = netdev_priv(dev);
  360. int index = 0;
  361. int i, strings = 0;
  362. struct bitmap_iterator it;
  363. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  364. switch (stringset) {
  365. case ETH_SS_TEST:
  366. for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
  367. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  368. if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
  369. for (; i < MLX4_EN_NUM_SELF_TEST; i++)
  370. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  371. break;
  372. case ETH_SS_STATS:
  373. /* Add main counters */
  374. for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
  375. bitmap_iterator_inc(&it))
  376. if (bitmap_iterator_test(&it))
  377. strcpy(data + (index++) * ETH_GSTRING_LEN,
  378. main_strings[strings]);
  379. for (i = 0; i < NUM_PORT_STATS; i++, strings++,
  380. bitmap_iterator_inc(&it))
  381. if (bitmap_iterator_test(&it))
  382. strcpy(data + (index++) * ETH_GSTRING_LEN,
  383. main_strings[strings]);
  384. for (i = 0; i < NUM_PF_STATS; i++, strings++,
  385. bitmap_iterator_inc(&it))
  386. if (bitmap_iterator_test(&it))
  387. strcpy(data + (index++) * ETH_GSTRING_LEN,
  388. main_strings[strings]);
  389. for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
  390. bitmap_iterator_inc(&it))
  391. if (bitmap_iterator_test(&it))
  392. strcpy(data + (index++) * ETH_GSTRING_LEN,
  393. main_strings[strings]);
  394. for (i = 0; i < NUM_PKT_STATS; i++, strings++,
  395. bitmap_iterator_inc(&it))
  396. if (bitmap_iterator_test(&it))
  397. strcpy(data + (index++) * ETH_GSTRING_LEN,
  398. main_strings[strings]);
  399. for (i = 0; i < priv->tx_ring_num; i++) {
  400. sprintf(data + (index++) * ETH_GSTRING_LEN,
  401. "tx%d_packets", i);
  402. sprintf(data + (index++) * ETH_GSTRING_LEN,
  403. "tx%d_bytes", i);
  404. }
  405. for (i = 0; i < priv->rx_ring_num; i++) {
  406. sprintf(data + (index++) * ETH_GSTRING_LEN,
  407. "rx%d_packets", i);
  408. sprintf(data + (index++) * ETH_GSTRING_LEN,
  409. "rx%d_bytes", i);
  410. }
  411. break;
  412. case ETH_SS_PRIV_FLAGS:
  413. for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
  414. strcpy(data + i * ETH_GSTRING_LEN,
  415. mlx4_en_priv_flags[i]);
  416. break;
  417. }
  418. }
  419. static u32 mlx4_en_autoneg_get(struct net_device *dev)
  420. {
  421. struct mlx4_en_priv *priv = netdev_priv(dev);
  422. struct mlx4_en_dev *mdev = priv->mdev;
  423. u32 autoneg = AUTONEG_DISABLE;
  424. if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
  425. (priv->port_state.flags & MLX4_EN_PORT_ANE))
  426. autoneg = AUTONEG_ENABLE;
  427. return autoneg;
  428. }
  429. static void ptys2ethtool_update_supported_port(unsigned long *mask,
  430. struct mlx4_ptys_reg *ptys_reg)
  431. {
  432. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  433. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  434. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  435. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  436. __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
  437. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  438. | MLX4_PROT_MASK(MLX4_10GBASE_SR)
  439. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  440. | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
  441. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  442. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  443. __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
  444. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  445. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  446. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  447. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  448. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  449. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  450. __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
  451. }
  452. }
  453. static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
  454. {
  455. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
  456. if (!eth_proto) /* link down */
  457. eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  458. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  459. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  460. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  461. return PORT_TP;
  462. }
  463. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
  464. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  465. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  466. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  467. return PORT_FIBRE;
  468. }
  469. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  470. | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
  471. | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
  472. return PORT_DA;
  473. }
  474. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  475. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  476. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  477. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  478. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  479. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  480. return PORT_NONE;
  481. }
  482. return PORT_OTHER;
  483. }
  484. #define MLX4_LINK_MODES_SZ \
  485. (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
  486. enum ethtool_report {
  487. SUPPORTED = 0,
  488. ADVERTISED = 1,
  489. };
  490. struct ptys2ethtool_config {
  491. __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
  492. __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
  493. u32 speed;
  494. };
  495. static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
  496. enum ethtool_report report)
  497. {
  498. switch (report) {
  499. case SUPPORTED:
  500. return cfg->supported;
  501. case ADVERTISED:
  502. return cfg->advertised;
  503. }
  504. return NULL;
  505. }
  506. #define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
  507. ({ \
  508. struct ptys2ethtool_config *cfg; \
  509. const unsigned int modes[] = { __VA_ARGS__ }; \
  510. unsigned int i; \
  511. cfg = &ptys2ethtool_map[reg_]; \
  512. cfg->speed = speed_; \
  513. bitmap_zero(cfg->supported, \
  514. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  515. bitmap_zero(cfg->advertised, \
  516. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  517. for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
  518. __set_bit(modes[i], cfg->supported); \
  519. __set_bit(modes[i], cfg->advertised); \
  520. } \
  521. })
  522. /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
  523. static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ];
  524. void __init mlx4_en_init_ptys2ethtool_map(void)
  525. {
  526. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100,
  527. ETHTOOL_LINK_MODE_100baseT_Full_BIT);
  528. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
  529. ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
  530. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
  531. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  532. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
  533. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  534. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
  535. ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
  536. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000,
  537. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  538. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000,
  539. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  540. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
  541. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  542. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
  543. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  544. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
  545. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  546. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
  547. ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
  548. ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
  549. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000,
  550. ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
  551. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000,
  552. ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
  553. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000,
  554. ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
  555. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000,
  556. ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
  557. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000,
  558. ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT);
  559. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
  560. ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
  561. };
  562. static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
  563. u32 eth_proto,
  564. enum ethtool_report report)
  565. {
  566. int i;
  567. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  568. if (eth_proto & MLX4_PROT_MASK(i))
  569. bitmap_or(link_modes, link_modes,
  570. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  571. report),
  572. __ETHTOOL_LINK_MODE_MASK_NBITS);
  573. }
  574. }
  575. static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
  576. enum ethtool_report report)
  577. {
  578. int i;
  579. u32 ptys_modes = 0;
  580. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  581. if (bitmap_intersects(
  582. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  583. report),
  584. link_modes,
  585. __ETHTOOL_LINK_MODE_MASK_NBITS))
  586. ptys_modes |= 1 << i;
  587. }
  588. return ptys_modes;
  589. }
  590. /* Convert actual speed (SPEED_XXX) to ptys link modes */
  591. static u32 speed2ptys_link_modes(u32 speed)
  592. {
  593. int i;
  594. u32 ptys_modes = 0;
  595. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  596. if (ptys2ethtool_map[i].speed == speed)
  597. ptys_modes |= 1 << i;
  598. }
  599. return ptys_modes;
  600. }
  601. static int
  602. ethtool_get_ptys_link_ksettings(struct net_device *dev,
  603. struct ethtool_link_ksettings *link_ksettings)
  604. {
  605. struct mlx4_en_priv *priv = netdev_priv(dev);
  606. struct mlx4_ptys_reg ptys_reg;
  607. u32 eth_proto;
  608. int ret;
  609. memset(&ptys_reg, 0, sizeof(ptys_reg));
  610. ptys_reg.local_port = priv->port;
  611. ptys_reg.proto_mask = MLX4_PTYS_EN;
  612. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  613. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  614. if (ret) {
  615. en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
  616. ret);
  617. return ret;
  618. }
  619. en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
  620. ptys_reg.proto_mask);
  621. en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
  622. be32_to_cpu(ptys_reg.eth_proto_cap));
  623. en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
  624. be32_to_cpu(ptys_reg.eth_proto_admin));
  625. en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
  626. be32_to_cpu(ptys_reg.eth_proto_oper));
  627. en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
  628. be32_to_cpu(ptys_reg.eth_proto_lp_adv));
  629. /* reset supported/advertising masks */
  630. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  631. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  632. ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported,
  633. &ptys_reg);
  634. eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
  635. ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported,
  636. eth_proto, SUPPORTED);
  637. eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
  638. ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising,
  639. eth_proto, ADVERTISED);
  640. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  641. Pause);
  642. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  643. Asym_Pause);
  644. if (priv->prof->tx_pause)
  645. ethtool_link_ksettings_add_link_mode(link_ksettings,
  646. advertising, Pause);
  647. if (priv->prof->tx_pause ^ priv->prof->rx_pause)
  648. ethtool_link_ksettings_add_link_mode(link_ksettings,
  649. advertising, Asym_Pause);
  650. link_ksettings->base.port = ptys_get_active_port(&ptys_reg);
  651. if (mlx4_en_autoneg_get(dev)) {
  652. ethtool_link_ksettings_add_link_mode(link_ksettings,
  653. supported, Autoneg);
  654. ethtool_link_ksettings_add_link_mode(link_ksettings,
  655. advertising, Autoneg);
  656. }
  657. link_ksettings->base.autoneg
  658. = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  659. AUTONEG_ENABLE : AUTONEG_DISABLE;
  660. eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
  661. ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
  662. ptys2ethtool_update_link_modes(
  663. link_ksettings->link_modes.lp_advertising,
  664. eth_proto, ADVERTISED);
  665. if (priv->port_state.flags & MLX4_EN_PORT_ANC)
  666. ethtool_link_ksettings_add_link_mode(link_ksettings,
  667. lp_advertising, Autoneg);
  668. link_ksettings->base.phy_address = 0;
  669. link_ksettings->base.mdio_support = 0;
  670. link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
  671. link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
  672. return ret;
  673. }
  674. static void
  675. ethtool_get_default_link_ksettings(
  676. struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
  677. {
  678. struct mlx4_en_priv *priv = netdev_priv(dev);
  679. int trans_type;
  680. link_ksettings->base.autoneg = AUTONEG_DISABLE;
  681. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  682. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  683. 10000baseT_Full);
  684. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  685. ethtool_link_ksettings_add_link_mode(link_ksettings, advertising,
  686. 10000baseT_Full);
  687. trans_type = priv->port_state.transceiver;
  688. if (trans_type > 0 && trans_type <= 0xC) {
  689. link_ksettings->base.port = PORT_FIBRE;
  690. ethtool_link_ksettings_add_link_mode(link_ksettings,
  691. supported, FIBRE);
  692. ethtool_link_ksettings_add_link_mode(link_ksettings,
  693. advertising, FIBRE);
  694. } else if (trans_type == 0x80 || trans_type == 0) {
  695. link_ksettings->base.port = PORT_TP;
  696. ethtool_link_ksettings_add_link_mode(link_ksettings,
  697. supported, TP);
  698. ethtool_link_ksettings_add_link_mode(link_ksettings,
  699. advertising, TP);
  700. } else {
  701. link_ksettings->base.port = -1;
  702. }
  703. }
  704. static int
  705. mlx4_en_get_link_ksettings(struct net_device *dev,
  706. struct ethtool_link_ksettings *link_ksettings)
  707. {
  708. struct mlx4_en_priv *priv = netdev_priv(dev);
  709. int ret = -EINVAL;
  710. if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
  711. return -ENOMEM;
  712. en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
  713. priv->port_state.flags & MLX4_EN_PORT_ANC,
  714. priv->port_state.flags & MLX4_EN_PORT_ANE);
  715. if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
  716. ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
  717. if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
  718. ethtool_get_default_link_ksettings(dev, link_ksettings);
  719. if (netif_carrier_ok(dev)) {
  720. link_ksettings->base.speed = priv->port_state.link_speed;
  721. link_ksettings->base.duplex = DUPLEX_FULL;
  722. } else {
  723. link_ksettings->base.speed = SPEED_UNKNOWN;
  724. link_ksettings->base.duplex = DUPLEX_UNKNOWN;
  725. }
  726. return 0;
  727. }
  728. /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
  729. static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
  730. __be32 proto_cap)
  731. {
  732. __be32 proto_admin = 0;
  733. if (!speed) { /* Speed = 0 ==> Reset Link modes */
  734. proto_admin = proto_cap;
  735. en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
  736. be32_to_cpu(proto_cap));
  737. } else {
  738. u32 ptys_link_modes = speed2ptys_link_modes(speed);
  739. proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
  740. en_info(priv, "Setting Speed to %d\n", speed);
  741. }
  742. return proto_admin;
  743. }
  744. static int
  745. mlx4_en_set_link_ksettings(struct net_device *dev,
  746. const struct ethtool_link_ksettings *link_ksettings)
  747. {
  748. struct mlx4_en_priv *priv = netdev_priv(dev);
  749. struct mlx4_ptys_reg ptys_reg;
  750. __be32 proto_admin;
  751. int ret;
  752. u32 ptys_adv = ethtool2ptys_link_modes(
  753. link_ksettings->link_modes.advertising, ADVERTISED);
  754. const int speed = link_ksettings->base.speed;
  755. en_dbg(DRV, priv,
  756. "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n",
  757. speed, __ETHTOOL_LINK_MODE_MASK_NBITS,
  758. link_ksettings->link_modes.advertising,
  759. link_ksettings->base.autoneg,
  760. link_ksettings->base.duplex);
  761. if (!(priv->mdev->dev->caps.flags2 &
  762. MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
  763. (link_ksettings->base.duplex == DUPLEX_HALF))
  764. return -EINVAL;
  765. memset(&ptys_reg, 0, sizeof(ptys_reg));
  766. ptys_reg.local_port = priv->port;
  767. ptys_reg.proto_mask = MLX4_PTYS_EN;
  768. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  769. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  770. if (ret) {
  771. en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
  772. ret);
  773. return 0;
  774. }
  775. proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
  776. cpu_to_be32(ptys_adv) :
  777. speed_set_ptys_admin(priv, speed,
  778. ptys_reg.eth_proto_cap);
  779. proto_admin &= ptys_reg.eth_proto_cap;
  780. if (!proto_admin) {
  781. en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
  782. return -EINVAL; /* nothing to change due to bad input */
  783. }
  784. if (proto_admin == ptys_reg.eth_proto_admin)
  785. return 0; /* Nothing to change */
  786. en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
  787. be32_to_cpu(proto_admin));
  788. ptys_reg.eth_proto_admin = proto_admin;
  789. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
  790. &ptys_reg);
  791. if (ret) {
  792. en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
  793. be32_to_cpu(ptys_reg.eth_proto_admin), ret);
  794. return ret;
  795. }
  796. mutex_lock(&priv->mdev->state_lock);
  797. if (priv->port_up) {
  798. en_warn(priv, "Port link mode changed, restarting port...\n");
  799. mlx4_en_stop_port(dev, 1);
  800. if (mlx4_en_start_port(dev))
  801. en_err(priv, "Failed restarting port %d\n", priv->port);
  802. }
  803. mutex_unlock(&priv->mdev->state_lock);
  804. return 0;
  805. }
  806. static int mlx4_en_get_coalesce(struct net_device *dev,
  807. struct ethtool_coalesce *coal)
  808. {
  809. struct mlx4_en_priv *priv = netdev_priv(dev);
  810. coal->tx_coalesce_usecs = priv->tx_usecs;
  811. coal->tx_max_coalesced_frames = priv->tx_frames;
  812. coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
  813. coal->rx_coalesce_usecs = priv->rx_usecs;
  814. coal->rx_max_coalesced_frames = priv->rx_frames;
  815. coal->pkt_rate_low = priv->pkt_rate_low;
  816. coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
  817. coal->pkt_rate_high = priv->pkt_rate_high;
  818. coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
  819. coal->rate_sample_interval = priv->sample_interval;
  820. coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
  821. return 0;
  822. }
  823. static int mlx4_en_set_coalesce(struct net_device *dev,
  824. struct ethtool_coalesce *coal)
  825. {
  826. struct mlx4_en_priv *priv = netdev_priv(dev);
  827. if (!coal->tx_max_coalesced_frames_irq)
  828. return -EINVAL;
  829. priv->rx_frames = (coal->rx_max_coalesced_frames ==
  830. MLX4_EN_AUTO_CONF) ?
  831. MLX4_EN_RX_COAL_TARGET :
  832. coal->rx_max_coalesced_frames;
  833. priv->rx_usecs = (coal->rx_coalesce_usecs ==
  834. MLX4_EN_AUTO_CONF) ?
  835. MLX4_EN_RX_COAL_TIME :
  836. coal->rx_coalesce_usecs;
  837. /* Setting TX coalescing parameters */
  838. if (coal->tx_coalesce_usecs != priv->tx_usecs ||
  839. coal->tx_max_coalesced_frames != priv->tx_frames) {
  840. priv->tx_usecs = coal->tx_coalesce_usecs;
  841. priv->tx_frames = coal->tx_max_coalesced_frames;
  842. }
  843. /* Set adaptive coalescing params */
  844. priv->pkt_rate_low = coal->pkt_rate_low;
  845. priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
  846. priv->pkt_rate_high = coal->pkt_rate_high;
  847. priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
  848. priv->sample_interval = coal->rate_sample_interval;
  849. priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
  850. priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
  851. return mlx4_en_moderation_update(priv);
  852. }
  853. static int mlx4_en_set_pauseparam(struct net_device *dev,
  854. struct ethtool_pauseparam *pause)
  855. {
  856. struct mlx4_en_priv *priv = netdev_priv(dev);
  857. struct mlx4_en_dev *mdev = priv->mdev;
  858. int err;
  859. if (pause->autoneg)
  860. return -EINVAL;
  861. priv->prof->tx_pause = pause->tx_pause != 0;
  862. priv->prof->rx_pause = pause->rx_pause != 0;
  863. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  864. priv->rx_skb_size + ETH_FCS_LEN,
  865. priv->prof->tx_pause,
  866. priv->prof->tx_ppp,
  867. priv->prof->rx_pause,
  868. priv->prof->rx_ppp);
  869. if (err)
  870. en_err(priv, "Failed setting pause params\n");
  871. else
  872. mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
  873. priv->prof->rx_ppp,
  874. priv->prof->rx_pause,
  875. priv->prof->tx_ppp,
  876. priv->prof->tx_pause);
  877. return err;
  878. }
  879. static void mlx4_en_get_pauseparam(struct net_device *dev,
  880. struct ethtool_pauseparam *pause)
  881. {
  882. struct mlx4_en_priv *priv = netdev_priv(dev);
  883. pause->tx_pause = priv->prof->tx_pause;
  884. pause->rx_pause = priv->prof->rx_pause;
  885. }
  886. static int mlx4_en_set_ringparam(struct net_device *dev,
  887. struct ethtool_ringparam *param)
  888. {
  889. struct mlx4_en_priv *priv = netdev_priv(dev);
  890. struct mlx4_en_dev *mdev = priv->mdev;
  891. u32 rx_size, tx_size;
  892. int port_up = 0;
  893. int err = 0;
  894. if (param->rx_jumbo_pending || param->rx_mini_pending)
  895. return -EINVAL;
  896. rx_size = roundup_pow_of_two(param->rx_pending);
  897. rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
  898. rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
  899. tx_size = roundup_pow_of_two(param->tx_pending);
  900. tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
  901. tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
  902. if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
  903. priv->rx_ring[0]->size) &&
  904. tx_size == priv->tx_ring[0]->size)
  905. return 0;
  906. mutex_lock(&mdev->state_lock);
  907. if (priv->port_up) {
  908. port_up = 1;
  909. mlx4_en_stop_port(dev, 1);
  910. }
  911. mlx4_en_free_resources(priv);
  912. priv->prof->tx_ring_size = tx_size;
  913. priv->prof->rx_ring_size = rx_size;
  914. err = mlx4_en_alloc_resources(priv);
  915. if (err) {
  916. en_err(priv, "Failed reallocating port resources\n");
  917. goto out;
  918. }
  919. if (port_up) {
  920. err = mlx4_en_start_port(dev);
  921. if (err)
  922. en_err(priv, "Failed starting port\n");
  923. }
  924. err = mlx4_en_moderation_update(priv);
  925. out:
  926. mutex_unlock(&mdev->state_lock);
  927. return err;
  928. }
  929. static void mlx4_en_get_ringparam(struct net_device *dev,
  930. struct ethtool_ringparam *param)
  931. {
  932. struct mlx4_en_priv *priv = netdev_priv(dev);
  933. memset(param, 0, sizeof(*param));
  934. param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
  935. param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
  936. param->rx_pending = priv->port_up ?
  937. priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
  938. param->tx_pending = priv->tx_ring[0]->size;
  939. }
  940. static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
  941. {
  942. struct mlx4_en_priv *priv = netdev_priv(dev);
  943. return priv->rx_ring_num;
  944. }
  945. static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
  946. {
  947. return MLX4_EN_RSS_KEY_SIZE;
  948. }
  949. static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
  950. {
  951. struct mlx4_en_priv *priv = netdev_priv(dev);
  952. /* check if requested function is supported by the device */
  953. if (hfunc == ETH_RSS_HASH_TOP) {
  954. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
  955. return -EINVAL;
  956. if (!(dev->features & NETIF_F_RXHASH))
  957. en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
  958. return 0;
  959. } else if (hfunc == ETH_RSS_HASH_XOR) {
  960. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
  961. return -EINVAL;
  962. if (dev->features & NETIF_F_RXHASH)
  963. en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
  964. return 0;
  965. }
  966. return -EINVAL;
  967. }
  968. static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
  969. u8 *hfunc)
  970. {
  971. struct mlx4_en_priv *priv = netdev_priv(dev);
  972. struct mlx4_en_rss_map *rss_map = &priv->rss_map;
  973. int rss_rings;
  974. size_t n = priv->rx_ring_num;
  975. int err = 0;
  976. rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
  977. rss_rings = 1 << ilog2(rss_rings);
  978. while (n--) {
  979. if (!ring_index)
  980. break;
  981. ring_index[n] = rss_map->qps[n % rss_rings].qpn -
  982. rss_map->base_qpn;
  983. }
  984. if (key)
  985. memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
  986. if (hfunc)
  987. *hfunc = priv->rss_hash_fn;
  988. return err;
  989. }
  990. static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
  991. const u8 *key, const u8 hfunc)
  992. {
  993. struct mlx4_en_priv *priv = netdev_priv(dev);
  994. struct mlx4_en_dev *mdev = priv->mdev;
  995. int port_up = 0;
  996. int err = 0;
  997. int i;
  998. int rss_rings = 0;
  999. /* Calculate RSS table size and make sure flows are spread evenly
  1000. * between rings
  1001. */
  1002. for (i = 0; i < priv->rx_ring_num; i++) {
  1003. if (!ring_index)
  1004. continue;
  1005. if (i > 0 && !ring_index[i] && !rss_rings)
  1006. rss_rings = i;
  1007. if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
  1008. return -EINVAL;
  1009. }
  1010. if (!rss_rings)
  1011. rss_rings = priv->rx_ring_num;
  1012. /* RSS table size must be an order of 2 */
  1013. if (!is_power_of_2(rss_rings))
  1014. return -EINVAL;
  1015. if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
  1016. err = mlx4_en_check_rxfh_func(dev, hfunc);
  1017. if (err)
  1018. return err;
  1019. }
  1020. mutex_lock(&mdev->state_lock);
  1021. if (priv->port_up) {
  1022. port_up = 1;
  1023. mlx4_en_stop_port(dev, 1);
  1024. }
  1025. if (ring_index)
  1026. priv->prof->rss_rings = rss_rings;
  1027. if (key)
  1028. memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
  1029. if (hfunc != ETH_RSS_HASH_NO_CHANGE)
  1030. priv->rss_hash_fn = hfunc;
  1031. if (port_up) {
  1032. err = mlx4_en_start_port(dev);
  1033. if (err)
  1034. en_err(priv, "Failed starting port\n");
  1035. }
  1036. mutex_unlock(&mdev->state_lock);
  1037. return err;
  1038. }
  1039. #define all_zeros_or_all_ones(field) \
  1040. ((field) == 0 || (field) == (__force typeof(field))-1)
  1041. static int mlx4_en_validate_flow(struct net_device *dev,
  1042. struct ethtool_rxnfc *cmd)
  1043. {
  1044. struct ethtool_usrip4_spec *l3_mask;
  1045. struct ethtool_tcpip4_spec *l4_mask;
  1046. struct ethhdr *eth_mask;
  1047. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1048. return -EINVAL;
  1049. if (cmd->fs.flow_type & FLOW_MAC_EXT) {
  1050. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1051. if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
  1052. return -EINVAL;
  1053. }
  1054. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1055. case TCP_V4_FLOW:
  1056. case UDP_V4_FLOW:
  1057. if (cmd->fs.m_u.tcp_ip4_spec.tos)
  1058. return -EINVAL;
  1059. l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1060. /* don't allow mask which isn't all 0 or 1 */
  1061. if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
  1062. !all_zeros_or_all_ones(l4_mask->ip4dst) ||
  1063. !all_zeros_or_all_ones(l4_mask->psrc) ||
  1064. !all_zeros_or_all_ones(l4_mask->pdst))
  1065. return -EINVAL;
  1066. break;
  1067. case IP_USER_FLOW:
  1068. l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1069. if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
  1070. cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
  1071. (!l3_mask->ip4src && !l3_mask->ip4dst) ||
  1072. !all_zeros_or_all_ones(l3_mask->ip4src) ||
  1073. !all_zeros_or_all_ones(l3_mask->ip4dst))
  1074. return -EINVAL;
  1075. break;
  1076. case ETHER_FLOW:
  1077. eth_mask = &cmd->fs.m_u.ether_spec;
  1078. /* source mac mask must not be set */
  1079. if (!is_zero_ether_addr(eth_mask->h_source))
  1080. return -EINVAL;
  1081. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1082. if (!is_broadcast_ether_addr(eth_mask->h_dest))
  1083. return -EINVAL;
  1084. if (!all_zeros_or_all_ones(eth_mask->h_proto))
  1085. return -EINVAL;
  1086. break;
  1087. default:
  1088. return -EINVAL;
  1089. }
  1090. if ((cmd->fs.flow_type & FLOW_EXT)) {
  1091. if (cmd->fs.m_ext.vlan_etype ||
  1092. !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1093. 0 ||
  1094. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1095. cpu_to_be16(VLAN_VID_MASK)))
  1096. return -EINVAL;
  1097. if (cmd->fs.m_ext.vlan_tci) {
  1098. if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
  1099. return -EINVAL;
  1100. }
  1101. }
  1102. return 0;
  1103. }
  1104. static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
  1105. struct list_head *rule_list_h,
  1106. struct mlx4_spec_list *spec_l2,
  1107. unsigned char *mac)
  1108. {
  1109. int err = 0;
  1110. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  1111. spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
  1112. memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
  1113. memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
  1114. if ((cmd->fs.flow_type & FLOW_EXT) &&
  1115. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
  1116. spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
  1117. spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
  1118. }
  1119. list_add_tail(&spec_l2->list, rule_list_h);
  1120. return err;
  1121. }
  1122. static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
  1123. struct ethtool_rxnfc *cmd,
  1124. struct list_head *rule_list_h,
  1125. struct mlx4_spec_list *spec_l2,
  1126. __be32 ipv4_dst)
  1127. {
  1128. #ifdef CONFIG_INET
  1129. unsigned char mac[ETH_ALEN];
  1130. if (!ipv4_is_multicast(ipv4_dst)) {
  1131. if (cmd->fs.flow_type & FLOW_MAC_EXT)
  1132. memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
  1133. else
  1134. memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
  1135. } else {
  1136. ip_eth_mc_map(ipv4_dst, mac);
  1137. }
  1138. return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
  1139. #else
  1140. return -EINVAL;
  1141. #endif
  1142. }
  1143. static int add_ip_rule(struct mlx4_en_priv *priv,
  1144. struct ethtool_rxnfc *cmd,
  1145. struct list_head *list_h)
  1146. {
  1147. int err;
  1148. struct mlx4_spec_list *spec_l2 = NULL;
  1149. struct mlx4_spec_list *spec_l3 = NULL;
  1150. struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1151. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1152. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1153. if (!spec_l2 || !spec_l3) {
  1154. err = -ENOMEM;
  1155. goto free_spec;
  1156. }
  1157. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
  1158. cmd->fs.h_u.
  1159. usr_ip4_spec.ip4dst);
  1160. if (err)
  1161. goto free_spec;
  1162. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1163. spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
  1164. if (l3_mask->ip4src)
  1165. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1166. spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
  1167. if (l3_mask->ip4dst)
  1168. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1169. list_add_tail(&spec_l3->list, list_h);
  1170. return 0;
  1171. free_spec:
  1172. kfree(spec_l2);
  1173. kfree(spec_l3);
  1174. return err;
  1175. }
  1176. static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
  1177. struct ethtool_rxnfc *cmd,
  1178. struct list_head *list_h, int proto)
  1179. {
  1180. int err;
  1181. struct mlx4_spec_list *spec_l2 = NULL;
  1182. struct mlx4_spec_list *spec_l3 = NULL;
  1183. struct mlx4_spec_list *spec_l4 = NULL;
  1184. struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1185. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1186. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1187. spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
  1188. if (!spec_l2 || !spec_l3 || !spec_l4) {
  1189. err = -ENOMEM;
  1190. goto free_spec;
  1191. }
  1192. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1193. if (proto == TCP_V4_FLOW) {
  1194. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1195. spec_l2,
  1196. cmd->fs.h_u.
  1197. tcp_ip4_spec.ip4dst);
  1198. if (err)
  1199. goto free_spec;
  1200. spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
  1201. spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
  1202. spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
  1203. spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
  1204. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
  1205. } else {
  1206. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1207. spec_l2,
  1208. cmd->fs.h_u.
  1209. udp_ip4_spec.ip4dst);
  1210. if (err)
  1211. goto free_spec;
  1212. spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
  1213. spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
  1214. spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
  1215. spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
  1216. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
  1217. }
  1218. if (l4_mask->ip4src)
  1219. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1220. if (l4_mask->ip4dst)
  1221. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1222. if (l4_mask->psrc)
  1223. spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
  1224. if (l4_mask->pdst)
  1225. spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
  1226. list_add_tail(&spec_l3->list, list_h);
  1227. list_add_tail(&spec_l4->list, list_h);
  1228. return 0;
  1229. free_spec:
  1230. kfree(spec_l2);
  1231. kfree(spec_l3);
  1232. kfree(spec_l4);
  1233. return err;
  1234. }
  1235. static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
  1236. struct ethtool_rxnfc *cmd,
  1237. struct list_head *rule_list_h)
  1238. {
  1239. int err;
  1240. struct ethhdr *eth_spec;
  1241. struct mlx4_spec_list *spec_l2;
  1242. struct mlx4_en_priv *priv = netdev_priv(dev);
  1243. err = mlx4_en_validate_flow(dev, cmd);
  1244. if (err)
  1245. return err;
  1246. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1247. case ETHER_FLOW:
  1248. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1249. if (!spec_l2)
  1250. return -ENOMEM;
  1251. eth_spec = &cmd->fs.h_u.ether_spec;
  1252. mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
  1253. &eth_spec->h_dest[0]);
  1254. spec_l2->eth.ether_type = eth_spec->h_proto;
  1255. if (eth_spec->h_proto)
  1256. spec_l2->eth.ether_type_enable = 1;
  1257. break;
  1258. case IP_USER_FLOW:
  1259. err = add_ip_rule(priv, cmd, rule_list_h);
  1260. break;
  1261. case TCP_V4_FLOW:
  1262. err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
  1263. break;
  1264. case UDP_V4_FLOW:
  1265. err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
  1266. break;
  1267. }
  1268. return err;
  1269. }
  1270. static int mlx4_en_flow_replace(struct net_device *dev,
  1271. struct ethtool_rxnfc *cmd)
  1272. {
  1273. int err;
  1274. struct mlx4_en_priv *priv = netdev_priv(dev);
  1275. struct ethtool_flow_id *loc_rule;
  1276. struct mlx4_spec_list *spec, *tmp_spec;
  1277. u32 qpn;
  1278. u64 reg_id;
  1279. struct mlx4_net_trans_rule rule = {
  1280. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  1281. .exclusive = 0,
  1282. .allow_loopback = 1,
  1283. .promisc_mode = MLX4_FS_REGULAR,
  1284. };
  1285. rule.port = priv->port;
  1286. rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
  1287. INIT_LIST_HEAD(&rule.list);
  1288. /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
  1289. if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
  1290. qpn = priv->drop_qp.qpn;
  1291. else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
  1292. qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
  1293. } else {
  1294. if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
  1295. en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
  1296. cmd->fs.ring_cookie);
  1297. return -EINVAL;
  1298. }
  1299. qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
  1300. if (!qpn) {
  1301. en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
  1302. cmd->fs.ring_cookie);
  1303. return -EINVAL;
  1304. }
  1305. }
  1306. rule.qpn = qpn;
  1307. err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
  1308. if (err)
  1309. goto out_free_list;
  1310. loc_rule = &priv->ethtool_rules[cmd->fs.location];
  1311. if (loc_rule->id) {
  1312. err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
  1313. if (err) {
  1314. en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
  1315. cmd->fs.location, loc_rule->id);
  1316. goto out_free_list;
  1317. }
  1318. loc_rule->id = 0;
  1319. memset(&loc_rule->flow_spec, 0,
  1320. sizeof(struct ethtool_rx_flow_spec));
  1321. list_del(&loc_rule->list);
  1322. }
  1323. err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
  1324. if (err) {
  1325. en_err(priv, "Fail to attach network rule at location %d\n",
  1326. cmd->fs.location);
  1327. goto out_free_list;
  1328. }
  1329. loc_rule->id = reg_id;
  1330. memcpy(&loc_rule->flow_spec, &cmd->fs,
  1331. sizeof(struct ethtool_rx_flow_spec));
  1332. list_add_tail(&loc_rule->list, &priv->ethtool_list);
  1333. out_free_list:
  1334. list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
  1335. list_del(&spec->list);
  1336. kfree(spec);
  1337. }
  1338. return err;
  1339. }
  1340. static int mlx4_en_flow_detach(struct net_device *dev,
  1341. struct ethtool_rxnfc *cmd)
  1342. {
  1343. int err = 0;
  1344. struct ethtool_flow_id *rule;
  1345. struct mlx4_en_priv *priv = netdev_priv(dev);
  1346. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1347. return -EINVAL;
  1348. rule = &priv->ethtool_rules[cmd->fs.location];
  1349. if (!rule->id) {
  1350. err = -ENOENT;
  1351. goto out;
  1352. }
  1353. err = mlx4_flow_detach(priv->mdev->dev, rule->id);
  1354. if (err) {
  1355. en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
  1356. cmd->fs.location, rule->id);
  1357. goto out;
  1358. }
  1359. rule->id = 0;
  1360. memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
  1361. list_del(&rule->list);
  1362. out:
  1363. return err;
  1364. }
  1365. static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1366. int loc)
  1367. {
  1368. int err = 0;
  1369. struct ethtool_flow_id *rule;
  1370. struct mlx4_en_priv *priv = netdev_priv(dev);
  1371. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  1372. return -EINVAL;
  1373. rule = &priv->ethtool_rules[loc];
  1374. if (rule->id)
  1375. memcpy(&cmd->fs, &rule->flow_spec,
  1376. sizeof(struct ethtool_rx_flow_spec));
  1377. else
  1378. err = -ENOENT;
  1379. return err;
  1380. }
  1381. static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
  1382. {
  1383. int i, res = 0;
  1384. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  1385. if (priv->ethtool_rules[i].id)
  1386. res++;
  1387. }
  1388. return res;
  1389. }
  1390. static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1391. u32 *rule_locs)
  1392. {
  1393. struct mlx4_en_priv *priv = netdev_priv(dev);
  1394. struct mlx4_en_dev *mdev = priv->mdev;
  1395. int err = 0;
  1396. int i = 0, priority = 0;
  1397. if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
  1398. cmd->cmd == ETHTOOL_GRXCLSRULE ||
  1399. cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
  1400. (mdev->dev->caps.steering_mode !=
  1401. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
  1402. return -EINVAL;
  1403. switch (cmd->cmd) {
  1404. case ETHTOOL_GRXRINGS:
  1405. cmd->data = priv->rx_ring_num;
  1406. break;
  1407. case ETHTOOL_GRXCLSRLCNT:
  1408. cmd->rule_cnt = mlx4_en_get_num_flows(priv);
  1409. break;
  1410. case ETHTOOL_GRXCLSRULE:
  1411. err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
  1412. break;
  1413. case ETHTOOL_GRXCLSRLALL:
  1414. while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
  1415. err = mlx4_en_get_flow(dev, cmd, i);
  1416. if (!err)
  1417. rule_locs[priority++] = i;
  1418. i++;
  1419. }
  1420. err = 0;
  1421. break;
  1422. default:
  1423. err = -EOPNOTSUPP;
  1424. break;
  1425. }
  1426. return err;
  1427. }
  1428. static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1429. {
  1430. int err = 0;
  1431. struct mlx4_en_priv *priv = netdev_priv(dev);
  1432. struct mlx4_en_dev *mdev = priv->mdev;
  1433. if (mdev->dev->caps.steering_mode !=
  1434. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
  1435. return -EINVAL;
  1436. switch (cmd->cmd) {
  1437. case ETHTOOL_SRXCLSRLINS:
  1438. err = mlx4_en_flow_replace(dev, cmd);
  1439. break;
  1440. case ETHTOOL_SRXCLSRLDEL:
  1441. err = mlx4_en_flow_detach(dev, cmd);
  1442. break;
  1443. default:
  1444. en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
  1445. return -EINVAL;
  1446. }
  1447. return err;
  1448. }
  1449. static void mlx4_en_get_channels(struct net_device *dev,
  1450. struct ethtool_channels *channel)
  1451. {
  1452. struct mlx4_en_priv *priv = netdev_priv(dev);
  1453. memset(channel, 0, sizeof(*channel));
  1454. channel->max_rx = MAX_RX_RINGS;
  1455. channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
  1456. channel->rx_count = priv->rx_ring_num;
  1457. channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
  1458. }
  1459. static int mlx4_en_set_channels(struct net_device *dev,
  1460. struct ethtool_channels *channel)
  1461. {
  1462. struct mlx4_en_priv *priv = netdev_priv(dev);
  1463. struct mlx4_en_dev *mdev = priv->mdev;
  1464. int port_up = 0;
  1465. int err = 0;
  1466. if (channel->other_count || channel->combined_count ||
  1467. channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
  1468. channel->rx_count > MAX_RX_RINGS ||
  1469. !channel->tx_count || !channel->rx_count)
  1470. return -EINVAL;
  1471. mutex_lock(&mdev->state_lock);
  1472. if (priv->port_up) {
  1473. port_up = 1;
  1474. mlx4_en_stop_port(dev, 1);
  1475. }
  1476. mlx4_en_free_resources(priv);
  1477. priv->num_tx_rings_p_up = channel->tx_count;
  1478. priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
  1479. priv->rx_ring_num = channel->rx_count;
  1480. err = mlx4_en_alloc_resources(priv);
  1481. if (err) {
  1482. en_err(priv, "Failed reallocating port resources\n");
  1483. goto out;
  1484. }
  1485. netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
  1486. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1487. if (dev->num_tc)
  1488. mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
  1489. en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
  1490. en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
  1491. if (port_up) {
  1492. err = mlx4_en_start_port(dev);
  1493. if (err)
  1494. en_err(priv, "Failed starting port\n");
  1495. }
  1496. err = mlx4_en_moderation_update(priv);
  1497. out:
  1498. mutex_unlock(&mdev->state_lock);
  1499. return err;
  1500. }
  1501. static int mlx4_en_get_ts_info(struct net_device *dev,
  1502. struct ethtool_ts_info *info)
  1503. {
  1504. struct mlx4_en_priv *priv = netdev_priv(dev);
  1505. struct mlx4_en_dev *mdev = priv->mdev;
  1506. int ret;
  1507. ret = ethtool_op_get_ts_info(dev, info);
  1508. if (ret)
  1509. return ret;
  1510. if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
  1511. info->so_timestamping |=
  1512. SOF_TIMESTAMPING_TX_HARDWARE |
  1513. SOF_TIMESTAMPING_RX_HARDWARE |
  1514. SOF_TIMESTAMPING_RAW_HARDWARE;
  1515. info->tx_types =
  1516. (1 << HWTSTAMP_TX_OFF) |
  1517. (1 << HWTSTAMP_TX_ON);
  1518. info->rx_filters =
  1519. (1 << HWTSTAMP_FILTER_NONE) |
  1520. (1 << HWTSTAMP_FILTER_ALL);
  1521. if (mdev->ptp_clock)
  1522. info->phc_index = ptp_clock_index(mdev->ptp_clock);
  1523. }
  1524. return ret;
  1525. }
  1526. static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
  1527. {
  1528. struct mlx4_en_priv *priv = netdev_priv(dev);
  1529. struct mlx4_en_dev *mdev = priv->mdev;
  1530. bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1531. bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1532. bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
  1533. bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
  1534. int i;
  1535. int ret = 0;
  1536. if (bf_enabled_new != bf_enabled_old) {
  1537. if (bf_enabled_new) {
  1538. bool bf_supported = true;
  1539. for (i = 0; i < priv->tx_ring_num; i++)
  1540. bf_supported &= priv->tx_ring[i]->bf_alloced;
  1541. if (!bf_supported) {
  1542. en_err(priv, "BlueFlame is not supported\n");
  1543. return -EINVAL;
  1544. }
  1545. priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1546. } else {
  1547. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1548. }
  1549. for (i = 0; i < priv->tx_ring_num; i++)
  1550. priv->tx_ring[i]->bf_enabled = bf_enabled_new;
  1551. en_info(priv, "BlueFlame %s\n",
  1552. bf_enabled_new ? "Enabled" : "Disabled");
  1553. }
  1554. if (phv_enabled_new != phv_enabled_old) {
  1555. ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
  1556. if (ret)
  1557. return ret;
  1558. else if (phv_enabled_new)
  1559. priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
  1560. else
  1561. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
  1562. en_info(priv, "PHV bit %s\n",
  1563. phv_enabled_new ? "Enabled" : "Disabled");
  1564. }
  1565. return 0;
  1566. }
  1567. static u32 mlx4_en_get_priv_flags(struct net_device *dev)
  1568. {
  1569. struct mlx4_en_priv *priv = netdev_priv(dev);
  1570. return priv->pflags;
  1571. }
  1572. static int mlx4_en_get_tunable(struct net_device *dev,
  1573. const struct ethtool_tunable *tuna,
  1574. void *data)
  1575. {
  1576. const struct mlx4_en_priv *priv = netdev_priv(dev);
  1577. int ret = 0;
  1578. switch (tuna->id) {
  1579. case ETHTOOL_TX_COPYBREAK:
  1580. *(u32 *)data = priv->prof->inline_thold;
  1581. break;
  1582. default:
  1583. ret = -EINVAL;
  1584. break;
  1585. }
  1586. return ret;
  1587. }
  1588. static int mlx4_en_set_tunable(struct net_device *dev,
  1589. const struct ethtool_tunable *tuna,
  1590. const void *data)
  1591. {
  1592. struct mlx4_en_priv *priv = netdev_priv(dev);
  1593. int val, ret = 0;
  1594. switch (tuna->id) {
  1595. case ETHTOOL_TX_COPYBREAK:
  1596. val = *(u32 *)data;
  1597. if (val < MIN_PKT_LEN || val > MAX_INLINE)
  1598. ret = -EINVAL;
  1599. else
  1600. priv->prof->inline_thold = val;
  1601. break;
  1602. default:
  1603. ret = -EINVAL;
  1604. break;
  1605. }
  1606. return ret;
  1607. }
  1608. static int mlx4_en_get_module_info(struct net_device *dev,
  1609. struct ethtool_modinfo *modinfo)
  1610. {
  1611. struct mlx4_en_priv *priv = netdev_priv(dev);
  1612. struct mlx4_en_dev *mdev = priv->mdev;
  1613. int ret;
  1614. u8 data[4];
  1615. /* Read first 2 bytes to get Module & REV ID */
  1616. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1617. 0/*offset*/, 2/*size*/, data);
  1618. if (ret < 2)
  1619. return -EIO;
  1620. switch (data[0] /* identifier */) {
  1621. case MLX4_MODULE_ID_QSFP:
  1622. modinfo->type = ETH_MODULE_SFF_8436;
  1623. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1624. break;
  1625. case MLX4_MODULE_ID_QSFP_PLUS:
  1626. if (data[1] >= 0x3) { /* revision id */
  1627. modinfo->type = ETH_MODULE_SFF_8636;
  1628. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1629. } else {
  1630. modinfo->type = ETH_MODULE_SFF_8436;
  1631. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1632. }
  1633. break;
  1634. case MLX4_MODULE_ID_QSFP28:
  1635. modinfo->type = ETH_MODULE_SFF_8636;
  1636. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1637. break;
  1638. case MLX4_MODULE_ID_SFP:
  1639. modinfo->type = ETH_MODULE_SFF_8472;
  1640. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  1641. break;
  1642. default:
  1643. return -ENOSYS;
  1644. }
  1645. return 0;
  1646. }
  1647. static int mlx4_en_get_module_eeprom(struct net_device *dev,
  1648. struct ethtool_eeprom *ee,
  1649. u8 *data)
  1650. {
  1651. struct mlx4_en_priv *priv = netdev_priv(dev);
  1652. struct mlx4_en_dev *mdev = priv->mdev;
  1653. int offset = ee->offset;
  1654. int i = 0, ret;
  1655. if (ee->len == 0)
  1656. return -EINVAL;
  1657. memset(data, 0, ee->len);
  1658. while (i < ee->len) {
  1659. en_dbg(DRV, priv,
  1660. "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
  1661. i, offset, ee->len - i);
  1662. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1663. offset, ee->len - i, data + i);
  1664. if (!ret) /* Done reading */
  1665. return 0;
  1666. if (ret < 0) {
  1667. en_err(priv,
  1668. "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
  1669. i, offset, ee->len - i, ret);
  1670. return 0;
  1671. }
  1672. i += ret;
  1673. offset += ret;
  1674. }
  1675. return 0;
  1676. }
  1677. static int mlx4_en_set_phys_id(struct net_device *dev,
  1678. enum ethtool_phys_id_state state)
  1679. {
  1680. int err;
  1681. u16 beacon_duration;
  1682. struct mlx4_en_priv *priv = netdev_priv(dev);
  1683. struct mlx4_en_dev *mdev = priv->mdev;
  1684. if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
  1685. return -EOPNOTSUPP;
  1686. switch (state) {
  1687. case ETHTOOL_ID_ACTIVE:
  1688. beacon_duration = PORT_BEACON_MAX_LIMIT;
  1689. break;
  1690. case ETHTOOL_ID_INACTIVE:
  1691. beacon_duration = 0;
  1692. break;
  1693. default:
  1694. return -EOPNOTSUPP;
  1695. }
  1696. err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
  1697. return err;
  1698. }
  1699. const struct ethtool_ops mlx4_en_ethtool_ops = {
  1700. .get_drvinfo = mlx4_en_get_drvinfo,
  1701. .get_link_ksettings = mlx4_en_get_link_ksettings,
  1702. .set_link_ksettings = mlx4_en_set_link_ksettings,
  1703. .get_link = ethtool_op_get_link,
  1704. .get_strings = mlx4_en_get_strings,
  1705. .get_sset_count = mlx4_en_get_sset_count,
  1706. .get_ethtool_stats = mlx4_en_get_ethtool_stats,
  1707. .self_test = mlx4_en_self_test,
  1708. .set_phys_id = mlx4_en_set_phys_id,
  1709. .get_wol = mlx4_en_get_wol,
  1710. .set_wol = mlx4_en_set_wol,
  1711. .get_msglevel = mlx4_en_get_msglevel,
  1712. .set_msglevel = mlx4_en_set_msglevel,
  1713. .get_coalesce = mlx4_en_get_coalesce,
  1714. .set_coalesce = mlx4_en_set_coalesce,
  1715. .get_pauseparam = mlx4_en_get_pauseparam,
  1716. .set_pauseparam = mlx4_en_set_pauseparam,
  1717. .get_ringparam = mlx4_en_get_ringparam,
  1718. .set_ringparam = mlx4_en_set_ringparam,
  1719. .get_rxnfc = mlx4_en_get_rxnfc,
  1720. .set_rxnfc = mlx4_en_set_rxnfc,
  1721. .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
  1722. .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
  1723. .get_rxfh = mlx4_en_get_rxfh,
  1724. .set_rxfh = mlx4_en_set_rxfh,
  1725. .get_channels = mlx4_en_get_channels,
  1726. .set_channels = mlx4_en_set_channels,
  1727. .get_ts_info = mlx4_en_get_ts_info,
  1728. .set_priv_flags = mlx4_en_set_priv_flags,
  1729. .get_priv_flags = mlx4_en_get_priv_flags,
  1730. .get_tunable = mlx4_en_get_tunable,
  1731. .set_tunable = mlx4_en_set_tunable,
  1732. .get_module_info = mlx4_en_get_module_info,
  1733. .get_module_eeprom = mlx4_en_get_module_eeprom
  1734. };