en_ethtool.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/mlx4/driver.h>
  37. #include <linux/mlx4/device.h>
  38. #include <linux/in.h>
  39. #include <net/ip.h>
  40. #include "mlx4_en.h"
  41. #include "en_port.h"
  42. #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
  43. #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
  44. #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
  45. static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
  46. {
  47. int i;
  48. int err = 0;
  49. for (i = 0; i < priv->tx_ring_num; i++) {
  50. priv->tx_cq[i]->moder_cnt = priv->tx_frames;
  51. priv->tx_cq[i]->moder_time = priv->tx_usecs;
  52. if (priv->port_up) {
  53. err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
  54. if (err)
  55. return err;
  56. }
  57. }
  58. if (priv->adaptive_rx_coal)
  59. return 0;
  60. for (i = 0; i < priv->rx_ring_num; i++) {
  61. priv->rx_cq[i]->moder_cnt = priv->rx_frames;
  62. priv->rx_cq[i]->moder_time = priv->rx_usecs;
  63. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  64. if (priv->port_up) {
  65. err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
  66. if (err)
  67. return err;
  68. }
  69. }
  70. return err;
  71. }
  72. static void
  73. mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
  74. {
  75. struct mlx4_en_priv *priv = netdev_priv(dev);
  76. struct mlx4_en_dev *mdev = priv->mdev;
  77. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  78. strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
  79. sizeof(drvinfo->version));
  80. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  81. "%d.%d.%d",
  82. (u16) (mdev->dev->caps.fw_ver >> 32),
  83. (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
  84. (u16) (mdev->dev->caps.fw_ver & 0xffff));
  85. strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
  86. sizeof(drvinfo->bus_info));
  87. drvinfo->n_stats = 0;
  88. drvinfo->regdump_len = 0;
  89. drvinfo->eedump_len = 0;
  90. }
  91. static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
  92. "blueflame",
  93. };
  94. static const char main_strings[][ETH_GSTRING_LEN] = {
  95. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  96. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  97. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  98. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  99. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  100. "tx_heartbeat_errors", "tx_window_errors",
  101. /* port statistics */
  102. "tso_packets",
  103. "xmit_more",
  104. "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
  105. "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
  106. /* packet statistics */
  107. "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
  108. "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
  109. "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
  110. "tx_prio_6", "tx_prio_7",
  111. };
  112. #define NUM_MAIN_STATS 21
  113. #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
  114. static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
  115. "Interrupt Test",
  116. "Link Test",
  117. "Speed Test",
  118. "Register Test",
  119. "Loopback Test",
  120. };
  121. static u32 mlx4_en_get_msglevel(struct net_device *dev)
  122. {
  123. return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
  124. }
  125. static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
  126. {
  127. ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
  128. }
  129. static void mlx4_en_get_wol(struct net_device *netdev,
  130. struct ethtool_wolinfo *wol)
  131. {
  132. struct mlx4_en_priv *priv = netdev_priv(netdev);
  133. int err = 0;
  134. u64 config = 0;
  135. u64 mask;
  136. if ((priv->port < 1) || (priv->port > 2)) {
  137. en_err(priv, "Failed to get WoL information\n");
  138. return;
  139. }
  140. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  141. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  142. if (!(priv->mdev->dev->caps.flags & mask)) {
  143. wol->supported = 0;
  144. wol->wolopts = 0;
  145. return;
  146. }
  147. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  148. if (err) {
  149. en_err(priv, "Failed to get WoL information\n");
  150. return;
  151. }
  152. if (config & MLX4_EN_WOL_MAGIC)
  153. wol->supported = WAKE_MAGIC;
  154. else
  155. wol->supported = 0;
  156. if (config & MLX4_EN_WOL_ENABLED)
  157. wol->wolopts = WAKE_MAGIC;
  158. else
  159. wol->wolopts = 0;
  160. }
  161. static int mlx4_en_set_wol(struct net_device *netdev,
  162. struct ethtool_wolinfo *wol)
  163. {
  164. struct mlx4_en_priv *priv = netdev_priv(netdev);
  165. u64 config = 0;
  166. int err = 0;
  167. u64 mask;
  168. if ((priv->port < 1) || (priv->port > 2))
  169. return -EOPNOTSUPP;
  170. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  171. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  172. if (!(priv->mdev->dev->caps.flags & mask))
  173. return -EOPNOTSUPP;
  174. if (wol->supported & ~WAKE_MAGIC)
  175. return -EINVAL;
  176. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  177. if (err) {
  178. en_err(priv, "Failed to get WoL info, unable to modify\n");
  179. return err;
  180. }
  181. if (wol->wolopts & WAKE_MAGIC) {
  182. config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
  183. MLX4_EN_WOL_MAGIC;
  184. } else {
  185. config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
  186. config |= MLX4_EN_WOL_DO_MODIFY;
  187. }
  188. err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
  189. if (err)
  190. en_err(priv, "Failed to set WoL information\n");
  191. return err;
  192. }
  193. static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
  194. {
  195. struct mlx4_en_priv *priv = netdev_priv(dev);
  196. int bit_count = hweight64(priv->stats_bitmap);
  197. switch (sset) {
  198. case ETH_SS_STATS:
  199. return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
  200. (priv->tx_ring_num * 2) +
  201. #ifdef CONFIG_NET_RX_BUSY_POLL
  202. (priv->rx_ring_num * 5);
  203. #else
  204. (priv->rx_ring_num * 2);
  205. #endif
  206. case ETH_SS_TEST:
  207. return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
  208. & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
  209. case ETH_SS_PRIV_FLAGS:
  210. return ARRAY_SIZE(mlx4_en_priv_flags);
  211. default:
  212. return -EOPNOTSUPP;
  213. }
  214. }
  215. static void mlx4_en_get_ethtool_stats(struct net_device *dev,
  216. struct ethtool_stats *stats, uint64_t *data)
  217. {
  218. struct mlx4_en_priv *priv = netdev_priv(dev);
  219. int index = 0;
  220. int i, j = 0;
  221. spin_lock_bh(&priv->stats_lock);
  222. if (!(priv->stats_bitmap)) {
  223. for (i = 0; i < NUM_MAIN_STATS; i++)
  224. data[index++] =
  225. ((unsigned long *) &priv->stats)[i];
  226. for (i = 0; i < NUM_PORT_STATS; i++)
  227. data[index++] =
  228. ((unsigned long *) &priv->port_stats)[i];
  229. for (i = 0; i < NUM_PKT_STATS; i++)
  230. data[index++] =
  231. ((unsigned long *) &priv->pkstats)[i];
  232. } else {
  233. for (i = 0; i < NUM_MAIN_STATS; i++) {
  234. if ((priv->stats_bitmap >> j) & 1)
  235. data[index++] =
  236. ((unsigned long *) &priv->stats)[i];
  237. j++;
  238. }
  239. for (i = 0; i < NUM_PORT_STATS; i++) {
  240. if ((priv->stats_bitmap >> j) & 1)
  241. data[index++] =
  242. ((unsigned long *) &priv->port_stats)[i];
  243. j++;
  244. }
  245. }
  246. for (i = 0; i < priv->tx_ring_num; i++) {
  247. data[index++] = priv->tx_ring[i]->packets;
  248. data[index++] = priv->tx_ring[i]->bytes;
  249. }
  250. for (i = 0; i < priv->rx_ring_num; i++) {
  251. data[index++] = priv->rx_ring[i]->packets;
  252. data[index++] = priv->rx_ring[i]->bytes;
  253. #ifdef CONFIG_NET_RX_BUSY_POLL
  254. data[index++] = priv->rx_ring[i]->yields;
  255. data[index++] = priv->rx_ring[i]->misses;
  256. data[index++] = priv->rx_ring[i]->cleaned;
  257. #endif
  258. }
  259. spin_unlock_bh(&priv->stats_lock);
  260. }
  261. static void mlx4_en_self_test(struct net_device *dev,
  262. struct ethtool_test *etest, u64 *buf)
  263. {
  264. mlx4_en_ex_selftest(dev, &etest->flags, buf);
  265. }
  266. static void mlx4_en_get_strings(struct net_device *dev,
  267. uint32_t stringset, uint8_t *data)
  268. {
  269. struct mlx4_en_priv *priv = netdev_priv(dev);
  270. int index = 0;
  271. int i;
  272. switch (stringset) {
  273. case ETH_SS_TEST:
  274. for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
  275. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  276. if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
  277. for (; i < MLX4_EN_NUM_SELF_TEST; i++)
  278. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  279. break;
  280. case ETH_SS_STATS:
  281. /* Add main counters */
  282. if (!priv->stats_bitmap) {
  283. for (i = 0; i < NUM_MAIN_STATS; i++)
  284. strcpy(data + (index++) * ETH_GSTRING_LEN,
  285. main_strings[i]);
  286. for (i = 0; i < NUM_PORT_STATS; i++)
  287. strcpy(data + (index++) * ETH_GSTRING_LEN,
  288. main_strings[i +
  289. NUM_MAIN_STATS]);
  290. for (i = 0; i < NUM_PKT_STATS; i++)
  291. strcpy(data + (index++) * ETH_GSTRING_LEN,
  292. main_strings[i +
  293. NUM_MAIN_STATS +
  294. NUM_PORT_STATS]);
  295. } else
  296. for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
  297. if ((priv->stats_bitmap >> i) & 1) {
  298. strcpy(data +
  299. (index++) * ETH_GSTRING_LEN,
  300. main_strings[i]);
  301. }
  302. if (!(priv->stats_bitmap >> i))
  303. break;
  304. }
  305. for (i = 0; i < priv->tx_ring_num; i++) {
  306. sprintf(data + (index++) * ETH_GSTRING_LEN,
  307. "tx%d_packets", i);
  308. sprintf(data + (index++) * ETH_GSTRING_LEN,
  309. "tx%d_bytes", i);
  310. }
  311. for (i = 0; i < priv->rx_ring_num; i++) {
  312. sprintf(data + (index++) * ETH_GSTRING_LEN,
  313. "rx%d_packets", i);
  314. sprintf(data + (index++) * ETH_GSTRING_LEN,
  315. "rx%d_bytes", i);
  316. #ifdef CONFIG_NET_RX_BUSY_POLL
  317. sprintf(data + (index++) * ETH_GSTRING_LEN,
  318. "rx%d_napi_yield", i);
  319. sprintf(data + (index++) * ETH_GSTRING_LEN,
  320. "rx%d_misses", i);
  321. sprintf(data + (index++) * ETH_GSTRING_LEN,
  322. "rx%d_cleaned", i);
  323. #endif
  324. }
  325. break;
  326. case ETH_SS_PRIV_FLAGS:
  327. for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
  328. strcpy(data + i * ETH_GSTRING_LEN,
  329. mlx4_en_priv_flags[i]);
  330. break;
  331. }
  332. }
  333. static u32 mlx4_en_autoneg_get(struct net_device *dev)
  334. {
  335. struct mlx4_en_priv *priv = netdev_priv(dev);
  336. struct mlx4_en_dev *mdev = priv->mdev;
  337. u32 autoneg = AUTONEG_DISABLE;
  338. if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
  339. (priv->port_state.flags & MLX4_EN_PORT_ANE))
  340. autoneg = AUTONEG_ENABLE;
  341. return autoneg;
  342. }
  343. static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
  344. {
  345. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  346. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  347. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  348. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  349. return SUPPORTED_TP;
  350. }
  351. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  352. | MLX4_PROT_MASK(MLX4_10GBASE_SR)
  353. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  354. | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
  355. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  356. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  357. return SUPPORTED_FIBRE;
  358. }
  359. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  360. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  361. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  362. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  363. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  364. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  365. return SUPPORTED_Backplane;
  366. }
  367. return 0;
  368. }
  369. static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
  370. {
  371. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
  372. if (!eth_proto) /* link down */
  373. eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  374. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  375. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  376. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  377. return PORT_TP;
  378. }
  379. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
  380. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  381. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  382. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  383. return PORT_FIBRE;
  384. }
  385. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  386. | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
  387. | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
  388. return PORT_DA;
  389. }
  390. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  391. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  392. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  393. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  394. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  395. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  396. return PORT_NONE;
  397. }
  398. return PORT_OTHER;
  399. }
  400. #define MLX4_LINK_MODES_SZ \
  401. (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
  402. enum ethtool_report {
  403. SUPPORTED = 0,
  404. ADVERTISED = 1,
  405. SPEED = 2
  406. };
  407. /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
  408. static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
  409. [MLX4_100BASE_TX] = {
  410. SUPPORTED_100baseT_Full,
  411. ADVERTISED_100baseT_Full,
  412. SPEED_100
  413. },
  414. [MLX4_1000BASE_T] = {
  415. SUPPORTED_1000baseT_Full,
  416. ADVERTISED_1000baseT_Full,
  417. SPEED_1000
  418. },
  419. [MLX4_1000BASE_CX_SGMII] = {
  420. SUPPORTED_1000baseKX_Full,
  421. ADVERTISED_1000baseKX_Full,
  422. SPEED_1000
  423. },
  424. [MLX4_1000BASE_KX] = {
  425. SUPPORTED_1000baseKX_Full,
  426. ADVERTISED_1000baseKX_Full,
  427. SPEED_1000
  428. },
  429. [MLX4_10GBASE_T] = {
  430. SUPPORTED_10000baseT_Full,
  431. ADVERTISED_10000baseT_Full,
  432. SPEED_10000
  433. },
  434. [MLX4_10GBASE_CX4] = {
  435. SUPPORTED_10000baseKX4_Full,
  436. ADVERTISED_10000baseKX4_Full,
  437. SPEED_10000
  438. },
  439. [MLX4_10GBASE_KX4] = {
  440. SUPPORTED_10000baseKX4_Full,
  441. ADVERTISED_10000baseKX4_Full,
  442. SPEED_10000
  443. },
  444. [MLX4_10GBASE_KR] = {
  445. SUPPORTED_10000baseKR_Full,
  446. ADVERTISED_10000baseKR_Full,
  447. SPEED_10000
  448. },
  449. [MLX4_10GBASE_CR] = {
  450. SUPPORTED_10000baseKR_Full,
  451. ADVERTISED_10000baseKR_Full,
  452. SPEED_10000
  453. },
  454. [MLX4_10GBASE_SR] = {
  455. SUPPORTED_10000baseKR_Full,
  456. ADVERTISED_10000baseKR_Full,
  457. SPEED_10000
  458. },
  459. [MLX4_20GBASE_KR2] = {
  460. SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
  461. ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
  462. SPEED_20000
  463. },
  464. [MLX4_40GBASE_CR4] = {
  465. SUPPORTED_40000baseCR4_Full,
  466. ADVERTISED_40000baseCR4_Full,
  467. SPEED_40000
  468. },
  469. [MLX4_40GBASE_KR4] = {
  470. SUPPORTED_40000baseKR4_Full,
  471. ADVERTISED_40000baseKR4_Full,
  472. SPEED_40000
  473. },
  474. [MLX4_40GBASE_SR4] = {
  475. SUPPORTED_40000baseSR4_Full,
  476. ADVERTISED_40000baseSR4_Full,
  477. SPEED_40000
  478. },
  479. [MLX4_56GBASE_KR4] = {
  480. SUPPORTED_56000baseKR4_Full,
  481. ADVERTISED_56000baseKR4_Full,
  482. SPEED_56000
  483. },
  484. [MLX4_56GBASE_CR4] = {
  485. SUPPORTED_56000baseCR4_Full,
  486. ADVERTISED_56000baseCR4_Full,
  487. SPEED_56000
  488. },
  489. [MLX4_56GBASE_SR4] = {
  490. SUPPORTED_56000baseSR4_Full,
  491. ADVERTISED_56000baseSR4_Full,
  492. SPEED_56000
  493. },
  494. };
  495. static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
  496. {
  497. int i;
  498. u32 link_modes = 0;
  499. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  500. if (eth_proto & MLX4_PROT_MASK(i))
  501. link_modes |= ptys2ethtool_map[i][report];
  502. }
  503. return link_modes;
  504. }
  505. static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
  506. {
  507. int i;
  508. u32 ptys_modes = 0;
  509. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  510. if (ptys2ethtool_map[i][report] & link_modes)
  511. ptys_modes |= 1 << i;
  512. }
  513. return ptys_modes;
  514. }
  515. /* Convert actual speed (SPEED_XXX) to ptys link modes */
  516. static u32 speed2ptys_link_modes(u32 speed)
  517. {
  518. int i;
  519. u32 ptys_modes = 0;
  520. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  521. if (ptys2ethtool_map[i][SPEED] == speed)
  522. ptys_modes |= 1 << i;
  523. }
  524. return ptys_modes;
  525. }
  526. static int ethtool_get_ptys_settings(struct net_device *dev,
  527. struct ethtool_cmd *cmd)
  528. {
  529. struct mlx4_en_priv *priv = netdev_priv(dev);
  530. struct mlx4_ptys_reg ptys_reg;
  531. u32 eth_proto;
  532. int ret;
  533. memset(&ptys_reg, 0, sizeof(ptys_reg));
  534. ptys_reg.local_port = priv->port;
  535. ptys_reg.proto_mask = MLX4_PTYS_EN;
  536. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  537. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  538. if (ret) {
  539. en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
  540. ret);
  541. return ret;
  542. }
  543. en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
  544. ptys_reg.proto_mask);
  545. en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
  546. be32_to_cpu(ptys_reg.eth_proto_cap));
  547. en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
  548. be32_to_cpu(ptys_reg.eth_proto_admin));
  549. en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
  550. be32_to_cpu(ptys_reg.eth_proto_oper));
  551. en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
  552. be32_to_cpu(ptys_reg.eth_proto_lp_adv));
  553. cmd->supported = 0;
  554. cmd->advertising = 0;
  555. cmd->supported |= ptys_get_supported_port(&ptys_reg);
  556. eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
  557. cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
  558. eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
  559. cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
  560. cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  561. cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
  562. cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
  563. ADVERTISED_Asym_Pause : 0;
  564. cmd->port = ptys_get_active_port(&ptys_reg);
  565. cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
  566. XCVR_EXTERNAL : XCVR_INTERNAL;
  567. if (mlx4_en_autoneg_get(dev)) {
  568. cmd->supported |= SUPPORTED_Autoneg;
  569. cmd->advertising |= ADVERTISED_Autoneg;
  570. }
  571. cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  572. AUTONEG_ENABLE : AUTONEG_DISABLE;
  573. eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
  574. cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
  575. cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  576. ADVERTISED_Autoneg : 0;
  577. cmd->phy_address = 0;
  578. cmd->mdio_support = 0;
  579. cmd->maxtxpkt = 0;
  580. cmd->maxrxpkt = 0;
  581. cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
  582. cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
  583. return ret;
  584. }
  585. static void ethtool_get_default_settings(struct net_device *dev,
  586. struct ethtool_cmd *cmd)
  587. {
  588. struct mlx4_en_priv *priv = netdev_priv(dev);
  589. int trans_type;
  590. cmd->autoneg = AUTONEG_DISABLE;
  591. cmd->supported = SUPPORTED_10000baseT_Full;
  592. cmd->advertising = ADVERTISED_10000baseT_Full;
  593. trans_type = priv->port_state.transceiver;
  594. if (trans_type > 0 && trans_type <= 0xC) {
  595. cmd->port = PORT_FIBRE;
  596. cmd->transceiver = XCVR_EXTERNAL;
  597. cmd->supported |= SUPPORTED_FIBRE;
  598. cmd->advertising |= ADVERTISED_FIBRE;
  599. } else if (trans_type == 0x80 || trans_type == 0) {
  600. cmd->port = PORT_TP;
  601. cmd->transceiver = XCVR_INTERNAL;
  602. cmd->supported |= SUPPORTED_TP;
  603. cmd->advertising |= ADVERTISED_TP;
  604. } else {
  605. cmd->port = -1;
  606. cmd->transceiver = -1;
  607. }
  608. }
  609. static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  610. {
  611. struct mlx4_en_priv *priv = netdev_priv(dev);
  612. int ret = -EINVAL;
  613. if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
  614. return -ENOMEM;
  615. en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
  616. priv->port_state.flags & MLX4_EN_PORT_ANC,
  617. priv->port_state.flags & MLX4_EN_PORT_ANE);
  618. if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
  619. ret = ethtool_get_ptys_settings(dev, cmd);
  620. if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
  621. ethtool_get_default_settings(dev, cmd);
  622. if (netif_carrier_ok(dev)) {
  623. ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
  624. cmd->duplex = DUPLEX_FULL;
  625. } else {
  626. ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  627. cmd->duplex = DUPLEX_UNKNOWN;
  628. }
  629. return 0;
  630. }
  631. /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
  632. static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
  633. __be32 proto_cap)
  634. {
  635. __be32 proto_admin = 0;
  636. if (!speed) { /* Speed = 0 ==> Reset Link modes */
  637. proto_admin = proto_cap;
  638. en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
  639. be32_to_cpu(proto_cap));
  640. } else {
  641. u32 ptys_link_modes = speed2ptys_link_modes(speed);
  642. proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
  643. en_info(priv, "Setting Speed to %d\n", speed);
  644. }
  645. return proto_admin;
  646. }
  647. static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  648. {
  649. struct mlx4_en_priv *priv = netdev_priv(dev);
  650. struct mlx4_ptys_reg ptys_reg;
  651. __be32 proto_admin;
  652. int ret;
  653. u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
  654. int speed = ethtool_cmd_speed(cmd);
  655. en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
  656. speed, cmd->advertising, cmd->autoneg, cmd->duplex);
  657. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
  658. (cmd->duplex == DUPLEX_HALF))
  659. return -EINVAL;
  660. memset(&ptys_reg, 0, sizeof(ptys_reg));
  661. ptys_reg.local_port = priv->port;
  662. ptys_reg.proto_mask = MLX4_PTYS_EN;
  663. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  664. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  665. if (ret) {
  666. en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
  667. ret);
  668. return 0;
  669. }
  670. proto_admin = cpu_to_be32(ptys_adv);
  671. if (speed >= 0 && speed != priv->port_state.link_speed)
  672. /* If speed was set then speed decides :-) */
  673. proto_admin = speed_set_ptys_admin(priv, speed,
  674. ptys_reg.eth_proto_cap);
  675. proto_admin &= ptys_reg.eth_proto_cap;
  676. if (proto_admin == ptys_reg.eth_proto_admin)
  677. return 0; /* Nothing to change */
  678. if (!proto_admin) {
  679. en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
  680. return -EINVAL; /* nothing to change due to bad input */
  681. }
  682. en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
  683. be32_to_cpu(proto_admin));
  684. ptys_reg.eth_proto_admin = proto_admin;
  685. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
  686. &ptys_reg);
  687. if (ret) {
  688. en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
  689. be32_to_cpu(ptys_reg.eth_proto_admin), ret);
  690. return ret;
  691. }
  692. en_warn(priv, "Port link mode changed, restarting port...\n");
  693. mutex_lock(&priv->mdev->state_lock);
  694. if (priv->port_up) {
  695. mlx4_en_stop_port(dev, 1);
  696. if (mlx4_en_start_port(dev))
  697. en_err(priv, "Failed restarting port %d\n", priv->port);
  698. }
  699. mutex_unlock(&priv->mdev->state_lock);
  700. return 0;
  701. }
  702. static int mlx4_en_get_coalesce(struct net_device *dev,
  703. struct ethtool_coalesce *coal)
  704. {
  705. struct mlx4_en_priv *priv = netdev_priv(dev);
  706. coal->tx_coalesce_usecs = priv->tx_usecs;
  707. coal->tx_max_coalesced_frames = priv->tx_frames;
  708. coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
  709. coal->rx_coalesce_usecs = priv->rx_usecs;
  710. coal->rx_max_coalesced_frames = priv->rx_frames;
  711. coal->pkt_rate_low = priv->pkt_rate_low;
  712. coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
  713. coal->pkt_rate_high = priv->pkt_rate_high;
  714. coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
  715. coal->rate_sample_interval = priv->sample_interval;
  716. coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
  717. return 0;
  718. }
  719. static int mlx4_en_set_coalesce(struct net_device *dev,
  720. struct ethtool_coalesce *coal)
  721. {
  722. struct mlx4_en_priv *priv = netdev_priv(dev);
  723. if (!coal->tx_max_coalesced_frames_irq)
  724. return -EINVAL;
  725. priv->rx_frames = (coal->rx_max_coalesced_frames ==
  726. MLX4_EN_AUTO_CONF) ?
  727. MLX4_EN_RX_COAL_TARGET :
  728. coal->rx_max_coalesced_frames;
  729. priv->rx_usecs = (coal->rx_coalesce_usecs ==
  730. MLX4_EN_AUTO_CONF) ?
  731. MLX4_EN_RX_COAL_TIME :
  732. coal->rx_coalesce_usecs;
  733. /* Setting TX coalescing parameters */
  734. if (coal->tx_coalesce_usecs != priv->tx_usecs ||
  735. coal->tx_max_coalesced_frames != priv->tx_frames) {
  736. priv->tx_usecs = coal->tx_coalesce_usecs;
  737. priv->tx_frames = coal->tx_max_coalesced_frames;
  738. }
  739. /* Set adaptive coalescing params */
  740. priv->pkt_rate_low = coal->pkt_rate_low;
  741. priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
  742. priv->pkt_rate_high = coal->pkt_rate_high;
  743. priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
  744. priv->sample_interval = coal->rate_sample_interval;
  745. priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
  746. priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
  747. return mlx4_en_moderation_update(priv);
  748. }
  749. static int mlx4_en_set_pauseparam(struct net_device *dev,
  750. struct ethtool_pauseparam *pause)
  751. {
  752. struct mlx4_en_priv *priv = netdev_priv(dev);
  753. struct mlx4_en_dev *mdev = priv->mdev;
  754. int err;
  755. if (pause->autoneg)
  756. return -EINVAL;
  757. priv->prof->tx_pause = pause->tx_pause != 0;
  758. priv->prof->rx_pause = pause->rx_pause != 0;
  759. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  760. priv->rx_skb_size + ETH_FCS_LEN,
  761. priv->prof->tx_pause,
  762. priv->prof->tx_ppp,
  763. priv->prof->rx_pause,
  764. priv->prof->rx_ppp);
  765. if (err)
  766. en_err(priv, "Failed setting pause params\n");
  767. return err;
  768. }
  769. static void mlx4_en_get_pauseparam(struct net_device *dev,
  770. struct ethtool_pauseparam *pause)
  771. {
  772. struct mlx4_en_priv *priv = netdev_priv(dev);
  773. pause->tx_pause = priv->prof->tx_pause;
  774. pause->rx_pause = priv->prof->rx_pause;
  775. }
  776. static int mlx4_en_set_ringparam(struct net_device *dev,
  777. struct ethtool_ringparam *param)
  778. {
  779. struct mlx4_en_priv *priv = netdev_priv(dev);
  780. struct mlx4_en_dev *mdev = priv->mdev;
  781. u32 rx_size, tx_size;
  782. int port_up = 0;
  783. int err = 0;
  784. if (param->rx_jumbo_pending || param->rx_mini_pending)
  785. return -EINVAL;
  786. rx_size = roundup_pow_of_two(param->rx_pending);
  787. rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
  788. rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
  789. tx_size = roundup_pow_of_two(param->tx_pending);
  790. tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
  791. tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
  792. if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
  793. priv->rx_ring[0]->size) &&
  794. tx_size == priv->tx_ring[0]->size)
  795. return 0;
  796. mutex_lock(&mdev->state_lock);
  797. if (priv->port_up) {
  798. port_up = 1;
  799. mlx4_en_stop_port(dev, 1);
  800. }
  801. mlx4_en_free_resources(priv);
  802. priv->prof->tx_ring_size = tx_size;
  803. priv->prof->rx_ring_size = rx_size;
  804. err = mlx4_en_alloc_resources(priv);
  805. if (err) {
  806. en_err(priv, "Failed reallocating port resources\n");
  807. goto out;
  808. }
  809. if (port_up) {
  810. err = mlx4_en_start_port(dev);
  811. if (err)
  812. en_err(priv, "Failed starting port\n");
  813. }
  814. err = mlx4_en_moderation_update(priv);
  815. out:
  816. mutex_unlock(&mdev->state_lock);
  817. return err;
  818. }
  819. static void mlx4_en_get_ringparam(struct net_device *dev,
  820. struct ethtool_ringparam *param)
  821. {
  822. struct mlx4_en_priv *priv = netdev_priv(dev);
  823. memset(param, 0, sizeof(*param));
  824. param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
  825. param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
  826. param->rx_pending = priv->port_up ?
  827. priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
  828. param->tx_pending = priv->tx_ring[0]->size;
  829. }
  830. static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
  831. {
  832. struct mlx4_en_priv *priv = netdev_priv(dev);
  833. return priv->rx_ring_num;
  834. }
  835. static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
  836. {
  837. return MLX4_EN_RSS_KEY_SIZE;
  838. }
  839. static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
  840. {
  841. struct mlx4_en_priv *priv = netdev_priv(dev);
  842. /* check if requested function is supported by the device */
  843. if ((hfunc == ETH_RSS_HASH_TOP &&
  844. !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
  845. (hfunc == ETH_RSS_HASH_XOR &&
  846. !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
  847. return -EINVAL;
  848. priv->rss_hash_fn = hfunc;
  849. if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
  850. en_warn(priv,
  851. "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
  852. if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
  853. en_warn(priv,
  854. "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
  855. return 0;
  856. }
  857. static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
  858. u8 *hfunc)
  859. {
  860. struct mlx4_en_priv *priv = netdev_priv(dev);
  861. struct mlx4_en_rss_map *rss_map = &priv->rss_map;
  862. int rss_rings;
  863. size_t n = priv->rx_ring_num;
  864. int err = 0;
  865. rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
  866. rss_rings = 1 << ilog2(rss_rings);
  867. while (n--) {
  868. if (!ring_index)
  869. break;
  870. ring_index[n] = rss_map->qps[n % rss_rings].qpn -
  871. rss_map->base_qpn;
  872. }
  873. if (key)
  874. memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
  875. if (hfunc)
  876. *hfunc = priv->rss_hash_fn;
  877. return err;
  878. }
  879. static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
  880. const u8 *key, const u8 hfunc)
  881. {
  882. struct mlx4_en_priv *priv = netdev_priv(dev);
  883. struct mlx4_en_dev *mdev = priv->mdev;
  884. int port_up = 0;
  885. int err = 0;
  886. int i;
  887. int rss_rings = 0;
  888. /* Calculate RSS table size and make sure flows are spread evenly
  889. * between rings
  890. */
  891. for (i = 0; i < priv->rx_ring_num; i++) {
  892. if (!ring_index)
  893. continue;
  894. if (i > 0 && !ring_index[i] && !rss_rings)
  895. rss_rings = i;
  896. if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
  897. return -EINVAL;
  898. }
  899. if (!rss_rings)
  900. rss_rings = priv->rx_ring_num;
  901. /* RSS table size must be an order of 2 */
  902. if (!is_power_of_2(rss_rings))
  903. return -EINVAL;
  904. if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
  905. err = mlx4_en_check_rxfh_func(dev, hfunc);
  906. if (err)
  907. return err;
  908. }
  909. mutex_lock(&mdev->state_lock);
  910. if (priv->port_up) {
  911. port_up = 1;
  912. mlx4_en_stop_port(dev, 1);
  913. }
  914. if (ring_index)
  915. priv->prof->rss_rings = rss_rings;
  916. if (key)
  917. memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
  918. if (port_up) {
  919. err = mlx4_en_start_port(dev);
  920. if (err)
  921. en_err(priv, "Failed starting port\n");
  922. }
  923. mutex_unlock(&mdev->state_lock);
  924. return err;
  925. }
  926. #define all_zeros_or_all_ones(field) \
  927. ((field) == 0 || (field) == (__force typeof(field))-1)
  928. static int mlx4_en_validate_flow(struct net_device *dev,
  929. struct ethtool_rxnfc *cmd)
  930. {
  931. struct ethtool_usrip4_spec *l3_mask;
  932. struct ethtool_tcpip4_spec *l4_mask;
  933. struct ethhdr *eth_mask;
  934. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  935. return -EINVAL;
  936. if (cmd->fs.flow_type & FLOW_MAC_EXT) {
  937. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  938. if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
  939. return -EINVAL;
  940. }
  941. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  942. case TCP_V4_FLOW:
  943. case UDP_V4_FLOW:
  944. if (cmd->fs.m_u.tcp_ip4_spec.tos)
  945. return -EINVAL;
  946. l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  947. /* don't allow mask which isn't all 0 or 1 */
  948. if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
  949. !all_zeros_or_all_ones(l4_mask->ip4dst) ||
  950. !all_zeros_or_all_ones(l4_mask->psrc) ||
  951. !all_zeros_or_all_ones(l4_mask->pdst))
  952. return -EINVAL;
  953. break;
  954. case IP_USER_FLOW:
  955. l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  956. if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
  957. cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
  958. (!l3_mask->ip4src && !l3_mask->ip4dst) ||
  959. !all_zeros_or_all_ones(l3_mask->ip4src) ||
  960. !all_zeros_or_all_ones(l3_mask->ip4dst))
  961. return -EINVAL;
  962. break;
  963. case ETHER_FLOW:
  964. eth_mask = &cmd->fs.m_u.ether_spec;
  965. /* source mac mask must not be set */
  966. if (!is_zero_ether_addr(eth_mask->h_source))
  967. return -EINVAL;
  968. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  969. if (!is_broadcast_ether_addr(eth_mask->h_dest))
  970. return -EINVAL;
  971. if (!all_zeros_or_all_ones(eth_mask->h_proto))
  972. return -EINVAL;
  973. break;
  974. default:
  975. return -EINVAL;
  976. }
  977. if ((cmd->fs.flow_type & FLOW_EXT)) {
  978. if (cmd->fs.m_ext.vlan_etype ||
  979. !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  980. 0 ||
  981. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  982. cpu_to_be16(VLAN_VID_MASK)))
  983. return -EINVAL;
  984. if (cmd->fs.m_ext.vlan_tci) {
  985. if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
  986. return -EINVAL;
  987. }
  988. }
  989. return 0;
  990. }
  991. static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
  992. struct list_head *rule_list_h,
  993. struct mlx4_spec_list *spec_l2,
  994. unsigned char *mac)
  995. {
  996. int err = 0;
  997. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  998. spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
  999. memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
  1000. memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
  1001. if ((cmd->fs.flow_type & FLOW_EXT) &&
  1002. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
  1003. spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
  1004. spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
  1005. }
  1006. list_add_tail(&spec_l2->list, rule_list_h);
  1007. return err;
  1008. }
  1009. static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
  1010. struct ethtool_rxnfc *cmd,
  1011. struct list_head *rule_list_h,
  1012. struct mlx4_spec_list *spec_l2,
  1013. __be32 ipv4_dst)
  1014. {
  1015. #ifdef CONFIG_INET
  1016. unsigned char mac[ETH_ALEN];
  1017. if (!ipv4_is_multicast(ipv4_dst)) {
  1018. if (cmd->fs.flow_type & FLOW_MAC_EXT)
  1019. memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
  1020. else
  1021. memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
  1022. } else {
  1023. ip_eth_mc_map(ipv4_dst, mac);
  1024. }
  1025. return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
  1026. #else
  1027. return -EINVAL;
  1028. #endif
  1029. }
  1030. static int add_ip_rule(struct mlx4_en_priv *priv,
  1031. struct ethtool_rxnfc *cmd,
  1032. struct list_head *list_h)
  1033. {
  1034. int err;
  1035. struct mlx4_spec_list *spec_l2 = NULL;
  1036. struct mlx4_spec_list *spec_l3 = NULL;
  1037. struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1038. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1039. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1040. if (!spec_l2 || !spec_l3) {
  1041. err = -ENOMEM;
  1042. goto free_spec;
  1043. }
  1044. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
  1045. cmd->fs.h_u.
  1046. usr_ip4_spec.ip4dst);
  1047. if (err)
  1048. goto free_spec;
  1049. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1050. spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
  1051. if (l3_mask->ip4src)
  1052. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1053. spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
  1054. if (l3_mask->ip4dst)
  1055. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1056. list_add_tail(&spec_l3->list, list_h);
  1057. return 0;
  1058. free_spec:
  1059. kfree(spec_l2);
  1060. kfree(spec_l3);
  1061. return err;
  1062. }
  1063. static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
  1064. struct ethtool_rxnfc *cmd,
  1065. struct list_head *list_h, int proto)
  1066. {
  1067. int err;
  1068. struct mlx4_spec_list *spec_l2 = NULL;
  1069. struct mlx4_spec_list *spec_l3 = NULL;
  1070. struct mlx4_spec_list *spec_l4 = NULL;
  1071. struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1072. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1073. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1074. spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
  1075. if (!spec_l2 || !spec_l3 || !spec_l4) {
  1076. err = -ENOMEM;
  1077. goto free_spec;
  1078. }
  1079. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1080. if (proto == TCP_V4_FLOW) {
  1081. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1082. spec_l2,
  1083. cmd->fs.h_u.
  1084. tcp_ip4_spec.ip4dst);
  1085. if (err)
  1086. goto free_spec;
  1087. spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
  1088. spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
  1089. spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
  1090. spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
  1091. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
  1092. } else {
  1093. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1094. spec_l2,
  1095. cmd->fs.h_u.
  1096. udp_ip4_spec.ip4dst);
  1097. if (err)
  1098. goto free_spec;
  1099. spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
  1100. spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
  1101. spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
  1102. spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
  1103. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
  1104. }
  1105. if (l4_mask->ip4src)
  1106. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1107. if (l4_mask->ip4dst)
  1108. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1109. if (l4_mask->psrc)
  1110. spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
  1111. if (l4_mask->pdst)
  1112. spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
  1113. list_add_tail(&spec_l3->list, list_h);
  1114. list_add_tail(&spec_l4->list, list_h);
  1115. return 0;
  1116. free_spec:
  1117. kfree(spec_l2);
  1118. kfree(spec_l3);
  1119. kfree(spec_l4);
  1120. return err;
  1121. }
  1122. static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
  1123. struct ethtool_rxnfc *cmd,
  1124. struct list_head *rule_list_h)
  1125. {
  1126. int err;
  1127. struct ethhdr *eth_spec;
  1128. struct mlx4_spec_list *spec_l2;
  1129. struct mlx4_en_priv *priv = netdev_priv(dev);
  1130. err = mlx4_en_validate_flow(dev, cmd);
  1131. if (err)
  1132. return err;
  1133. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1134. case ETHER_FLOW:
  1135. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1136. if (!spec_l2)
  1137. return -ENOMEM;
  1138. eth_spec = &cmd->fs.h_u.ether_spec;
  1139. mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
  1140. &eth_spec->h_dest[0]);
  1141. spec_l2->eth.ether_type = eth_spec->h_proto;
  1142. if (eth_spec->h_proto)
  1143. spec_l2->eth.ether_type_enable = 1;
  1144. break;
  1145. case IP_USER_FLOW:
  1146. err = add_ip_rule(priv, cmd, rule_list_h);
  1147. break;
  1148. case TCP_V4_FLOW:
  1149. err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
  1150. break;
  1151. case UDP_V4_FLOW:
  1152. err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
  1153. break;
  1154. }
  1155. return err;
  1156. }
  1157. static int mlx4_en_flow_replace(struct net_device *dev,
  1158. struct ethtool_rxnfc *cmd)
  1159. {
  1160. int err;
  1161. struct mlx4_en_priv *priv = netdev_priv(dev);
  1162. struct ethtool_flow_id *loc_rule;
  1163. struct mlx4_spec_list *spec, *tmp_spec;
  1164. u32 qpn;
  1165. u64 reg_id;
  1166. struct mlx4_net_trans_rule rule = {
  1167. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  1168. .exclusive = 0,
  1169. .allow_loopback = 1,
  1170. .promisc_mode = MLX4_FS_REGULAR,
  1171. };
  1172. rule.port = priv->port;
  1173. rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
  1174. INIT_LIST_HEAD(&rule.list);
  1175. /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
  1176. if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
  1177. qpn = priv->drop_qp.qpn;
  1178. else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
  1179. qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
  1180. } else {
  1181. if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
  1182. en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
  1183. cmd->fs.ring_cookie);
  1184. return -EINVAL;
  1185. }
  1186. qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
  1187. if (!qpn) {
  1188. en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
  1189. cmd->fs.ring_cookie);
  1190. return -EINVAL;
  1191. }
  1192. }
  1193. rule.qpn = qpn;
  1194. err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
  1195. if (err)
  1196. goto out_free_list;
  1197. loc_rule = &priv->ethtool_rules[cmd->fs.location];
  1198. if (loc_rule->id) {
  1199. err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
  1200. if (err) {
  1201. en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
  1202. cmd->fs.location, loc_rule->id);
  1203. goto out_free_list;
  1204. }
  1205. loc_rule->id = 0;
  1206. memset(&loc_rule->flow_spec, 0,
  1207. sizeof(struct ethtool_rx_flow_spec));
  1208. list_del(&loc_rule->list);
  1209. }
  1210. err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
  1211. if (err) {
  1212. en_err(priv, "Fail to attach network rule at location %d\n",
  1213. cmd->fs.location);
  1214. goto out_free_list;
  1215. }
  1216. loc_rule->id = reg_id;
  1217. memcpy(&loc_rule->flow_spec, &cmd->fs,
  1218. sizeof(struct ethtool_rx_flow_spec));
  1219. list_add_tail(&loc_rule->list, &priv->ethtool_list);
  1220. out_free_list:
  1221. list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
  1222. list_del(&spec->list);
  1223. kfree(spec);
  1224. }
  1225. return err;
  1226. }
  1227. static int mlx4_en_flow_detach(struct net_device *dev,
  1228. struct ethtool_rxnfc *cmd)
  1229. {
  1230. int err = 0;
  1231. struct ethtool_flow_id *rule;
  1232. struct mlx4_en_priv *priv = netdev_priv(dev);
  1233. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1234. return -EINVAL;
  1235. rule = &priv->ethtool_rules[cmd->fs.location];
  1236. if (!rule->id) {
  1237. err = -ENOENT;
  1238. goto out;
  1239. }
  1240. err = mlx4_flow_detach(priv->mdev->dev, rule->id);
  1241. if (err) {
  1242. en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
  1243. cmd->fs.location, rule->id);
  1244. goto out;
  1245. }
  1246. rule->id = 0;
  1247. memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
  1248. list_del(&rule->list);
  1249. out:
  1250. return err;
  1251. }
  1252. static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1253. int loc)
  1254. {
  1255. int err = 0;
  1256. struct ethtool_flow_id *rule;
  1257. struct mlx4_en_priv *priv = netdev_priv(dev);
  1258. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  1259. return -EINVAL;
  1260. rule = &priv->ethtool_rules[loc];
  1261. if (rule->id)
  1262. memcpy(&cmd->fs, &rule->flow_spec,
  1263. sizeof(struct ethtool_rx_flow_spec));
  1264. else
  1265. err = -ENOENT;
  1266. return err;
  1267. }
  1268. static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
  1269. {
  1270. int i, res = 0;
  1271. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  1272. if (priv->ethtool_rules[i].id)
  1273. res++;
  1274. }
  1275. return res;
  1276. }
  1277. static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1278. u32 *rule_locs)
  1279. {
  1280. struct mlx4_en_priv *priv = netdev_priv(dev);
  1281. struct mlx4_en_dev *mdev = priv->mdev;
  1282. int err = 0;
  1283. int i = 0, priority = 0;
  1284. if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
  1285. cmd->cmd == ETHTOOL_GRXCLSRULE ||
  1286. cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
  1287. (mdev->dev->caps.steering_mode !=
  1288. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
  1289. return -EINVAL;
  1290. switch (cmd->cmd) {
  1291. case ETHTOOL_GRXRINGS:
  1292. cmd->data = priv->rx_ring_num;
  1293. break;
  1294. case ETHTOOL_GRXCLSRLCNT:
  1295. cmd->rule_cnt = mlx4_en_get_num_flows(priv);
  1296. break;
  1297. case ETHTOOL_GRXCLSRULE:
  1298. err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
  1299. break;
  1300. case ETHTOOL_GRXCLSRLALL:
  1301. while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
  1302. err = mlx4_en_get_flow(dev, cmd, i);
  1303. if (!err)
  1304. rule_locs[priority++] = i;
  1305. i++;
  1306. }
  1307. err = 0;
  1308. break;
  1309. default:
  1310. err = -EOPNOTSUPP;
  1311. break;
  1312. }
  1313. return err;
  1314. }
  1315. static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1316. {
  1317. int err = 0;
  1318. struct mlx4_en_priv *priv = netdev_priv(dev);
  1319. struct mlx4_en_dev *mdev = priv->mdev;
  1320. if (mdev->dev->caps.steering_mode !=
  1321. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
  1322. return -EINVAL;
  1323. switch (cmd->cmd) {
  1324. case ETHTOOL_SRXCLSRLINS:
  1325. err = mlx4_en_flow_replace(dev, cmd);
  1326. break;
  1327. case ETHTOOL_SRXCLSRLDEL:
  1328. err = mlx4_en_flow_detach(dev, cmd);
  1329. break;
  1330. default:
  1331. en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
  1332. return -EINVAL;
  1333. }
  1334. return err;
  1335. }
  1336. static void mlx4_en_get_channels(struct net_device *dev,
  1337. struct ethtool_channels *channel)
  1338. {
  1339. struct mlx4_en_priv *priv = netdev_priv(dev);
  1340. memset(channel, 0, sizeof(*channel));
  1341. channel->max_rx = MAX_RX_RINGS;
  1342. channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
  1343. channel->rx_count = priv->rx_ring_num;
  1344. channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
  1345. }
  1346. static int mlx4_en_set_channels(struct net_device *dev,
  1347. struct ethtool_channels *channel)
  1348. {
  1349. struct mlx4_en_priv *priv = netdev_priv(dev);
  1350. struct mlx4_en_dev *mdev = priv->mdev;
  1351. int port_up = 0;
  1352. int err = 0;
  1353. if (channel->other_count || channel->combined_count ||
  1354. channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
  1355. channel->rx_count > MAX_RX_RINGS ||
  1356. !channel->tx_count || !channel->rx_count)
  1357. return -EINVAL;
  1358. mutex_lock(&mdev->state_lock);
  1359. if (priv->port_up) {
  1360. port_up = 1;
  1361. mlx4_en_stop_port(dev, 1);
  1362. }
  1363. mlx4_en_free_resources(priv);
  1364. priv->num_tx_rings_p_up = channel->tx_count;
  1365. priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
  1366. priv->rx_ring_num = channel->rx_count;
  1367. err = mlx4_en_alloc_resources(priv);
  1368. if (err) {
  1369. en_err(priv, "Failed reallocating port resources\n");
  1370. goto out;
  1371. }
  1372. netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
  1373. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1374. if (dev->num_tc)
  1375. mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
  1376. en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
  1377. en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
  1378. if (port_up) {
  1379. err = mlx4_en_start_port(dev);
  1380. if (err)
  1381. en_err(priv, "Failed starting port\n");
  1382. }
  1383. err = mlx4_en_moderation_update(priv);
  1384. out:
  1385. mutex_unlock(&mdev->state_lock);
  1386. return err;
  1387. }
  1388. static int mlx4_en_get_ts_info(struct net_device *dev,
  1389. struct ethtool_ts_info *info)
  1390. {
  1391. struct mlx4_en_priv *priv = netdev_priv(dev);
  1392. struct mlx4_en_dev *mdev = priv->mdev;
  1393. int ret;
  1394. ret = ethtool_op_get_ts_info(dev, info);
  1395. if (ret)
  1396. return ret;
  1397. if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
  1398. info->so_timestamping |=
  1399. SOF_TIMESTAMPING_TX_HARDWARE |
  1400. SOF_TIMESTAMPING_RX_HARDWARE |
  1401. SOF_TIMESTAMPING_RAW_HARDWARE;
  1402. info->tx_types =
  1403. (1 << HWTSTAMP_TX_OFF) |
  1404. (1 << HWTSTAMP_TX_ON);
  1405. info->rx_filters =
  1406. (1 << HWTSTAMP_FILTER_NONE) |
  1407. (1 << HWTSTAMP_FILTER_ALL);
  1408. if (mdev->ptp_clock)
  1409. info->phc_index = ptp_clock_index(mdev->ptp_clock);
  1410. }
  1411. return ret;
  1412. }
  1413. static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
  1414. {
  1415. struct mlx4_en_priv *priv = netdev_priv(dev);
  1416. bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1417. bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1418. int i;
  1419. if (bf_enabled_new == bf_enabled_old)
  1420. return 0; /* Nothing to do */
  1421. if (bf_enabled_new) {
  1422. bool bf_supported = true;
  1423. for (i = 0; i < priv->tx_ring_num; i++)
  1424. bf_supported &= priv->tx_ring[i]->bf_alloced;
  1425. if (!bf_supported) {
  1426. en_err(priv, "BlueFlame is not supported\n");
  1427. return -EINVAL;
  1428. }
  1429. priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1430. } else {
  1431. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1432. }
  1433. for (i = 0; i < priv->tx_ring_num; i++)
  1434. priv->tx_ring[i]->bf_enabled = bf_enabled_new;
  1435. en_info(priv, "BlueFlame %s\n",
  1436. bf_enabled_new ? "Enabled" : "Disabled");
  1437. return 0;
  1438. }
  1439. static u32 mlx4_en_get_priv_flags(struct net_device *dev)
  1440. {
  1441. struct mlx4_en_priv *priv = netdev_priv(dev);
  1442. return priv->pflags;
  1443. }
  1444. static int mlx4_en_get_tunable(struct net_device *dev,
  1445. const struct ethtool_tunable *tuna,
  1446. void *data)
  1447. {
  1448. const struct mlx4_en_priv *priv = netdev_priv(dev);
  1449. int ret = 0;
  1450. switch (tuna->id) {
  1451. case ETHTOOL_TX_COPYBREAK:
  1452. *(u32 *)data = priv->prof->inline_thold;
  1453. break;
  1454. default:
  1455. ret = -EINVAL;
  1456. break;
  1457. }
  1458. return ret;
  1459. }
  1460. static int mlx4_en_set_tunable(struct net_device *dev,
  1461. const struct ethtool_tunable *tuna,
  1462. const void *data)
  1463. {
  1464. struct mlx4_en_priv *priv = netdev_priv(dev);
  1465. int val, ret = 0;
  1466. switch (tuna->id) {
  1467. case ETHTOOL_TX_COPYBREAK:
  1468. val = *(u32 *)data;
  1469. if (val < MIN_PKT_LEN || val > MAX_INLINE)
  1470. ret = -EINVAL;
  1471. else
  1472. priv->prof->inline_thold = val;
  1473. break;
  1474. default:
  1475. ret = -EINVAL;
  1476. break;
  1477. }
  1478. return ret;
  1479. }
  1480. static int mlx4_en_get_module_info(struct net_device *dev,
  1481. struct ethtool_modinfo *modinfo)
  1482. {
  1483. struct mlx4_en_priv *priv = netdev_priv(dev);
  1484. struct mlx4_en_dev *mdev = priv->mdev;
  1485. int ret;
  1486. u8 data[4];
  1487. /* Read first 2 bytes to get Module & REV ID */
  1488. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1489. 0/*offset*/, 2/*size*/, data);
  1490. if (ret < 2)
  1491. return -EIO;
  1492. switch (data[0] /* identifier */) {
  1493. case MLX4_MODULE_ID_QSFP:
  1494. modinfo->type = ETH_MODULE_SFF_8436;
  1495. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1496. break;
  1497. case MLX4_MODULE_ID_QSFP_PLUS:
  1498. if (data[1] >= 0x3) { /* revision id */
  1499. modinfo->type = ETH_MODULE_SFF_8636;
  1500. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1501. } else {
  1502. modinfo->type = ETH_MODULE_SFF_8436;
  1503. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1504. }
  1505. break;
  1506. case MLX4_MODULE_ID_QSFP28:
  1507. modinfo->type = ETH_MODULE_SFF_8636;
  1508. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1509. break;
  1510. case MLX4_MODULE_ID_SFP:
  1511. modinfo->type = ETH_MODULE_SFF_8472;
  1512. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  1513. break;
  1514. default:
  1515. return -ENOSYS;
  1516. }
  1517. return 0;
  1518. }
  1519. static int mlx4_en_get_module_eeprom(struct net_device *dev,
  1520. struct ethtool_eeprom *ee,
  1521. u8 *data)
  1522. {
  1523. struct mlx4_en_priv *priv = netdev_priv(dev);
  1524. struct mlx4_en_dev *mdev = priv->mdev;
  1525. int offset = ee->offset;
  1526. int i = 0, ret;
  1527. if (ee->len == 0)
  1528. return -EINVAL;
  1529. memset(data, 0, ee->len);
  1530. while (i < ee->len) {
  1531. en_dbg(DRV, priv,
  1532. "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
  1533. i, offset, ee->len - i);
  1534. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1535. offset, ee->len - i, data + i);
  1536. if (!ret) /* Done reading */
  1537. return 0;
  1538. if (ret < 0) {
  1539. en_err(priv,
  1540. "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
  1541. i, offset, ee->len - i, ret);
  1542. return 0;
  1543. }
  1544. i += ret;
  1545. offset += ret;
  1546. }
  1547. return 0;
  1548. }
  1549. const struct ethtool_ops mlx4_en_ethtool_ops = {
  1550. .get_drvinfo = mlx4_en_get_drvinfo,
  1551. .get_settings = mlx4_en_get_settings,
  1552. .set_settings = mlx4_en_set_settings,
  1553. .get_link = ethtool_op_get_link,
  1554. .get_strings = mlx4_en_get_strings,
  1555. .get_sset_count = mlx4_en_get_sset_count,
  1556. .get_ethtool_stats = mlx4_en_get_ethtool_stats,
  1557. .self_test = mlx4_en_self_test,
  1558. .get_wol = mlx4_en_get_wol,
  1559. .set_wol = mlx4_en_set_wol,
  1560. .get_msglevel = mlx4_en_get_msglevel,
  1561. .set_msglevel = mlx4_en_set_msglevel,
  1562. .get_coalesce = mlx4_en_get_coalesce,
  1563. .set_coalesce = mlx4_en_set_coalesce,
  1564. .get_pauseparam = mlx4_en_get_pauseparam,
  1565. .set_pauseparam = mlx4_en_set_pauseparam,
  1566. .get_ringparam = mlx4_en_get_ringparam,
  1567. .set_ringparam = mlx4_en_set_ringparam,
  1568. .get_rxnfc = mlx4_en_get_rxnfc,
  1569. .set_rxnfc = mlx4_en_set_rxnfc,
  1570. .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
  1571. .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
  1572. .get_rxfh = mlx4_en_get_rxfh,
  1573. .set_rxfh = mlx4_en_set_rxfh,
  1574. .get_channels = mlx4_en_get_channels,
  1575. .set_channels = mlx4_en_set_channels,
  1576. .get_ts_info = mlx4_en_get_ts_info,
  1577. .set_priv_flags = mlx4_en_set_priv_flags,
  1578. .get_priv_flags = mlx4_en_get_priv_flags,
  1579. .get_tunable = mlx4_en_get_tunable,
  1580. .set_tunable = mlx4_en_set_tunable,
  1581. .get_module_info = mlx4_en_get_module_info,
  1582. .get_module_eeprom = mlx4_en_get_module_eeprom
  1583. };