ethtool.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2013 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/netdevice.h>
  11. #include <linux/ethtool.h>
  12. #include <linux/rtnetlink.h>
  13. #include <linux/in.h>
  14. #include "net_driver.h"
  15. #include "workarounds.h"
  16. #include "selftest.h"
  17. #include "efx.h"
  18. #include "filter.h"
  19. #include "nic.h"
  20. struct efx_sw_stat_desc {
  21. const char *name;
  22. enum {
  23. EFX_ETHTOOL_STAT_SOURCE_nic,
  24. EFX_ETHTOOL_STAT_SOURCE_channel,
  25. EFX_ETHTOOL_STAT_SOURCE_tx_queue
  26. } source;
  27. unsigned offset;
  28. u64(*get_stat) (void *field); /* Reader function */
  29. };
  30. /* Initialiser for a struct efx_sw_stat_desc with type-checking */
  31. #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
  32. get_stat_function) { \
  33. .name = #stat_name, \
  34. .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
  35. .offset = ((((field_type *) 0) == \
  36. &((struct efx_##source_name *)0)->field) ? \
  37. offsetof(struct efx_##source_name, field) : \
  38. offsetof(struct efx_##source_name, field)), \
  39. .get_stat = get_stat_function, \
  40. }
  41. static u64 efx_get_uint_stat(void *field)
  42. {
  43. return *(unsigned int *)field;
  44. }
  45. static u64 efx_get_atomic_stat(void *field)
  46. {
  47. return atomic_read((atomic_t *) field);
  48. }
  49. #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
  50. EFX_ETHTOOL_STAT(field, nic, field, \
  51. atomic_t, efx_get_atomic_stat)
  52. #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
  53. EFX_ETHTOOL_STAT(field, channel, n_##field, \
  54. unsigned int, efx_get_uint_stat)
  55. #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
  56. EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
  57. unsigned int, efx_get_uint_stat)
  58. static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
  59. EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
  60. EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
  61. EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
  62. EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
  63. EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
  64. EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
  65. EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
  66. EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
  67. EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
  68. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
  69. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
  70. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
  71. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
  72. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
  73. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
  74. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
  75. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
  76. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
  77. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
  78. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
  79. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
  80. };
  81. #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
  82. #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
  83. /**************************************************************************
  84. *
  85. * Ethtool operations
  86. *
  87. **************************************************************************
  88. */
  89. /* Identify device by flashing LEDs */
  90. static int efx_ethtool_phys_id(struct net_device *net_dev,
  91. enum ethtool_phys_id_state state)
  92. {
  93. struct efx_nic *efx = netdev_priv(net_dev);
  94. enum efx_led_mode mode = EFX_LED_DEFAULT;
  95. switch (state) {
  96. case ETHTOOL_ID_ON:
  97. mode = EFX_LED_ON;
  98. break;
  99. case ETHTOOL_ID_OFF:
  100. mode = EFX_LED_OFF;
  101. break;
  102. case ETHTOOL_ID_INACTIVE:
  103. mode = EFX_LED_DEFAULT;
  104. break;
  105. case ETHTOOL_ID_ACTIVE:
  106. return 1; /* cycle on/off once per second */
  107. }
  108. efx->type->set_id_led(efx, mode);
  109. return 0;
  110. }
  111. /* This must be called with rtnl_lock held. */
  112. static int
  113. efx_ethtool_get_link_ksettings(struct net_device *net_dev,
  114. struct ethtool_link_ksettings *cmd)
  115. {
  116. struct efx_nic *efx = netdev_priv(net_dev);
  117. struct efx_link_state *link_state = &efx->link_state;
  118. u32 supported;
  119. mutex_lock(&efx->mac_lock);
  120. efx->phy_op->get_link_ksettings(efx, cmd);
  121. mutex_unlock(&efx->mac_lock);
  122. /* Both MACs support pause frames (bidirectional and respond-only) */
  123. ethtool_convert_link_mode_to_legacy_u32(&supported,
  124. cmd->link_modes.supported);
  125. supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  126. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  127. supported);
  128. if (LOOPBACK_INTERNAL(efx)) {
  129. cmd->base.speed = link_state->speed;
  130. cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
  131. }
  132. return 0;
  133. }
  134. /* This must be called with rtnl_lock held. */
  135. static int
  136. efx_ethtool_set_link_ksettings(struct net_device *net_dev,
  137. const struct ethtool_link_ksettings *cmd)
  138. {
  139. struct efx_nic *efx = netdev_priv(net_dev);
  140. int rc;
  141. /* GMAC does not support 1000Mbps HD */
  142. if ((cmd->base.speed == SPEED_1000) &&
  143. (cmd->base.duplex != DUPLEX_FULL)) {
  144. netif_dbg(efx, drv, efx->net_dev,
  145. "rejecting unsupported 1000Mbps HD setting\n");
  146. return -EINVAL;
  147. }
  148. mutex_lock(&efx->mac_lock);
  149. rc = efx->phy_op->set_link_ksettings(efx, cmd);
  150. mutex_unlock(&efx->mac_lock);
  151. return rc;
  152. }
  153. static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
  154. struct ethtool_drvinfo *info)
  155. {
  156. struct efx_nic *efx = netdev_priv(net_dev);
  157. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  158. strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
  159. efx_mcdi_print_fwver(efx, info->fw_version,
  160. sizeof(info->fw_version));
  161. strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
  162. }
  163. static int efx_ethtool_get_regs_len(struct net_device *net_dev)
  164. {
  165. return efx_nic_get_regs_len(netdev_priv(net_dev));
  166. }
  167. static void efx_ethtool_get_regs(struct net_device *net_dev,
  168. struct ethtool_regs *regs, void *buf)
  169. {
  170. struct efx_nic *efx = netdev_priv(net_dev);
  171. regs->version = efx->type->revision;
  172. efx_nic_get_regs(efx, buf);
  173. }
  174. static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
  175. {
  176. struct efx_nic *efx = netdev_priv(net_dev);
  177. return efx->msg_enable;
  178. }
  179. static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
  180. {
  181. struct efx_nic *efx = netdev_priv(net_dev);
  182. efx->msg_enable = msg_enable;
  183. }
  184. /**
  185. * efx_fill_test - fill in an individual self-test entry
  186. * @test_index: Index of the test
  187. * @strings: Ethtool strings, or %NULL
  188. * @data: Ethtool test results, or %NULL
  189. * @test: Pointer to test result (used only if data != %NULL)
  190. * @unit_format: Unit name format (e.g. "chan\%d")
  191. * @unit_id: Unit id (e.g. 0 for "chan0")
  192. * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
  193. * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
  194. *
  195. * Fill in an individual self-test entry.
  196. */
  197. static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
  198. int *test, const char *unit_format, int unit_id,
  199. const char *test_format, const char *test_id)
  200. {
  201. char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
  202. /* Fill data value, if applicable */
  203. if (data)
  204. data[test_index] = *test;
  205. /* Fill string, if applicable */
  206. if (strings) {
  207. if (strchr(unit_format, '%'))
  208. snprintf(unit_str, sizeof(unit_str),
  209. unit_format, unit_id);
  210. else
  211. strcpy(unit_str, unit_format);
  212. snprintf(test_str, sizeof(test_str), test_format, test_id);
  213. snprintf(strings + test_index * ETH_GSTRING_LEN,
  214. ETH_GSTRING_LEN,
  215. "%-6s %-24s", unit_str, test_str);
  216. }
  217. }
  218. #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
  219. #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
  220. #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
  221. #define EFX_LOOPBACK_NAME(_mode, _counter) \
  222. "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
  223. /**
  224. * efx_fill_loopback_test - fill in a block of loopback self-test entries
  225. * @efx: Efx NIC
  226. * @lb_tests: Efx loopback self-test results structure
  227. * @mode: Loopback test mode
  228. * @test_index: Starting index of the test
  229. * @strings: Ethtool strings, or %NULL
  230. * @data: Ethtool test results, or %NULL
  231. *
  232. * Fill in a block of loopback self-test entries. Return new test
  233. * index.
  234. */
  235. static int efx_fill_loopback_test(struct efx_nic *efx,
  236. struct efx_loopback_self_tests *lb_tests,
  237. enum efx_loopback_mode mode,
  238. unsigned int test_index,
  239. u8 *strings, u64 *data)
  240. {
  241. struct efx_channel *channel =
  242. efx_get_channel(efx, efx->tx_channel_offset);
  243. struct efx_tx_queue *tx_queue;
  244. efx_for_each_channel_tx_queue(tx_queue, channel) {
  245. efx_fill_test(test_index++, strings, data,
  246. &lb_tests->tx_sent[tx_queue->queue],
  247. EFX_TX_QUEUE_NAME(tx_queue),
  248. EFX_LOOPBACK_NAME(mode, "tx_sent"));
  249. efx_fill_test(test_index++, strings, data,
  250. &lb_tests->tx_done[tx_queue->queue],
  251. EFX_TX_QUEUE_NAME(tx_queue),
  252. EFX_LOOPBACK_NAME(mode, "tx_done"));
  253. }
  254. efx_fill_test(test_index++, strings, data,
  255. &lb_tests->rx_good,
  256. "rx", 0,
  257. EFX_LOOPBACK_NAME(mode, "rx_good"));
  258. efx_fill_test(test_index++, strings, data,
  259. &lb_tests->rx_bad,
  260. "rx", 0,
  261. EFX_LOOPBACK_NAME(mode, "rx_bad"));
  262. return test_index;
  263. }
  264. /**
  265. * efx_ethtool_fill_self_tests - get self-test details
  266. * @efx: Efx NIC
  267. * @tests: Efx self-test results structure, or %NULL
  268. * @strings: Ethtool strings, or %NULL
  269. * @data: Ethtool test results, or %NULL
  270. *
  271. * Get self-test number of strings, strings, and/or test results.
  272. * Return number of strings (== number of test results).
  273. *
  274. * The reason for merging these three functions is to make sure that
  275. * they can never be inconsistent.
  276. */
  277. static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
  278. struct efx_self_tests *tests,
  279. u8 *strings, u64 *data)
  280. {
  281. struct efx_channel *channel;
  282. unsigned int n = 0, i;
  283. enum efx_loopback_mode mode;
  284. efx_fill_test(n++, strings, data, &tests->phy_alive,
  285. "phy", 0, "alive", NULL);
  286. efx_fill_test(n++, strings, data, &tests->nvram,
  287. "core", 0, "nvram", NULL);
  288. efx_fill_test(n++, strings, data, &tests->interrupt,
  289. "core", 0, "interrupt", NULL);
  290. /* Event queues */
  291. efx_for_each_channel(channel, efx) {
  292. efx_fill_test(n++, strings, data,
  293. &tests->eventq_dma[channel->channel],
  294. EFX_CHANNEL_NAME(channel),
  295. "eventq.dma", NULL);
  296. efx_fill_test(n++, strings, data,
  297. &tests->eventq_int[channel->channel],
  298. EFX_CHANNEL_NAME(channel),
  299. "eventq.int", NULL);
  300. }
  301. efx_fill_test(n++, strings, data, &tests->memory,
  302. "core", 0, "memory", NULL);
  303. efx_fill_test(n++, strings, data, &tests->registers,
  304. "core", 0, "registers", NULL);
  305. if (efx->phy_op->run_tests != NULL) {
  306. EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
  307. for (i = 0; true; ++i) {
  308. const char *name;
  309. EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
  310. name = efx->phy_op->test_name(efx, i);
  311. if (name == NULL)
  312. break;
  313. efx_fill_test(n++, strings, data, &tests->phy_ext[i],
  314. "phy", 0, name, NULL);
  315. }
  316. }
  317. /* Loopback tests */
  318. for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
  319. if (!(efx->loopback_modes & (1 << mode)))
  320. continue;
  321. n = efx_fill_loopback_test(efx,
  322. &tests->loopback[mode], mode, n,
  323. strings, data);
  324. }
  325. return n;
  326. }
  327. static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
  328. {
  329. size_t n_stats = 0;
  330. struct efx_channel *channel;
  331. efx_for_each_channel(channel, efx) {
  332. if (efx_channel_has_tx_queues(channel)) {
  333. n_stats++;
  334. if (strings != NULL) {
  335. snprintf(strings, ETH_GSTRING_LEN,
  336. "tx-%u.tx_packets",
  337. channel->tx_queue[0].queue /
  338. EFX_TXQ_TYPES);
  339. strings += ETH_GSTRING_LEN;
  340. }
  341. }
  342. }
  343. efx_for_each_channel(channel, efx) {
  344. if (efx_channel_has_rx_queue(channel)) {
  345. n_stats++;
  346. if (strings != NULL) {
  347. snprintf(strings, ETH_GSTRING_LEN,
  348. "rx-%d.rx_packets", channel->channel);
  349. strings += ETH_GSTRING_LEN;
  350. }
  351. }
  352. }
  353. return n_stats;
  354. }
  355. static int efx_ethtool_get_sset_count(struct net_device *net_dev,
  356. int string_set)
  357. {
  358. struct efx_nic *efx = netdev_priv(net_dev);
  359. switch (string_set) {
  360. case ETH_SS_STATS:
  361. return efx->type->describe_stats(efx, NULL) +
  362. EFX_ETHTOOL_SW_STAT_COUNT +
  363. efx_describe_per_queue_stats(efx, NULL) +
  364. efx_ptp_describe_stats(efx, NULL);
  365. case ETH_SS_TEST:
  366. return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
  367. default:
  368. return -EINVAL;
  369. }
  370. }
  371. static void efx_ethtool_get_strings(struct net_device *net_dev,
  372. u32 string_set, u8 *strings)
  373. {
  374. struct efx_nic *efx = netdev_priv(net_dev);
  375. int i;
  376. switch (string_set) {
  377. case ETH_SS_STATS:
  378. strings += (efx->type->describe_stats(efx, strings) *
  379. ETH_GSTRING_LEN);
  380. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
  381. strlcpy(strings + i * ETH_GSTRING_LEN,
  382. efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
  383. strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
  384. strings += (efx_describe_per_queue_stats(efx, strings) *
  385. ETH_GSTRING_LEN);
  386. efx_ptp_describe_stats(efx, strings);
  387. break;
  388. case ETH_SS_TEST:
  389. efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
  390. break;
  391. default:
  392. /* No other string sets */
  393. break;
  394. }
  395. }
  396. static void efx_ethtool_get_stats(struct net_device *net_dev,
  397. struct ethtool_stats *stats,
  398. u64 *data)
  399. {
  400. struct efx_nic *efx = netdev_priv(net_dev);
  401. const struct efx_sw_stat_desc *stat;
  402. struct efx_channel *channel;
  403. struct efx_tx_queue *tx_queue;
  404. struct efx_rx_queue *rx_queue;
  405. int i;
  406. spin_lock_bh(&efx->stats_lock);
  407. /* Get NIC statistics */
  408. data += efx->type->update_stats(efx, data, NULL);
  409. /* Get software statistics */
  410. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
  411. stat = &efx_sw_stat_desc[i];
  412. switch (stat->source) {
  413. case EFX_ETHTOOL_STAT_SOURCE_nic:
  414. data[i] = stat->get_stat((void *)efx + stat->offset);
  415. break;
  416. case EFX_ETHTOOL_STAT_SOURCE_channel:
  417. data[i] = 0;
  418. efx_for_each_channel(channel, efx)
  419. data[i] += stat->get_stat((void *)channel +
  420. stat->offset);
  421. break;
  422. case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
  423. data[i] = 0;
  424. efx_for_each_channel(channel, efx) {
  425. efx_for_each_channel_tx_queue(tx_queue, channel)
  426. data[i] +=
  427. stat->get_stat((void *)tx_queue
  428. + stat->offset);
  429. }
  430. break;
  431. }
  432. }
  433. data += EFX_ETHTOOL_SW_STAT_COUNT;
  434. spin_unlock_bh(&efx->stats_lock);
  435. efx_for_each_channel(channel, efx) {
  436. if (efx_channel_has_tx_queues(channel)) {
  437. *data = 0;
  438. efx_for_each_channel_tx_queue(tx_queue, channel) {
  439. *data += tx_queue->tx_packets;
  440. }
  441. data++;
  442. }
  443. }
  444. efx_for_each_channel(channel, efx) {
  445. if (efx_channel_has_rx_queue(channel)) {
  446. *data = 0;
  447. efx_for_each_channel_rx_queue(rx_queue, channel) {
  448. *data += rx_queue->rx_packets;
  449. }
  450. data++;
  451. }
  452. }
  453. efx_ptp_update_stats(efx, data);
  454. }
  455. static void efx_ethtool_self_test(struct net_device *net_dev,
  456. struct ethtool_test *test, u64 *data)
  457. {
  458. struct efx_nic *efx = netdev_priv(net_dev);
  459. struct efx_self_tests *efx_tests;
  460. bool already_up;
  461. int rc = -ENOMEM;
  462. efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
  463. if (!efx_tests)
  464. goto fail;
  465. if (efx->state != STATE_READY) {
  466. rc = -EBUSY;
  467. goto out;
  468. }
  469. netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
  470. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  471. /* We need rx buffers and interrupts. */
  472. already_up = (efx->net_dev->flags & IFF_UP);
  473. if (!already_up) {
  474. rc = dev_open(efx->net_dev);
  475. if (rc) {
  476. netif_err(efx, drv, efx->net_dev,
  477. "failed opening device.\n");
  478. goto out;
  479. }
  480. }
  481. rc = efx_selftest(efx, efx_tests, test->flags);
  482. if (!already_up)
  483. dev_close(efx->net_dev);
  484. netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
  485. rc == 0 ? "passed" : "failed",
  486. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  487. out:
  488. efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
  489. kfree(efx_tests);
  490. fail:
  491. if (rc)
  492. test->flags |= ETH_TEST_FL_FAILED;
  493. }
  494. /* Restart autonegotiation */
  495. static int efx_ethtool_nway_reset(struct net_device *net_dev)
  496. {
  497. struct efx_nic *efx = netdev_priv(net_dev);
  498. return mdio45_nway_restart(&efx->mdio);
  499. }
  500. /*
  501. * Each channel has a single IRQ and moderation timer, started by any
  502. * completion (or other event). Unless the module parameter
  503. * separate_tx_channels is set, IRQs and moderation are therefore
  504. * shared between RX and TX completions. In this case, when RX IRQ
  505. * moderation is explicitly changed then TX IRQ moderation is
  506. * automatically changed too, but otherwise we fail if the two values
  507. * are requested to be different.
  508. *
  509. * The hardware does not support a limit on the number of completions
  510. * before an IRQ, so we do not use the max_frames fields. We should
  511. * report and require that max_frames == (usecs != 0), but this would
  512. * invalidate existing user documentation.
  513. *
  514. * The hardware does not have distinct settings for interrupt
  515. * moderation while the previous IRQ is being handled, so we should
  516. * not use the 'irq' fields. However, an earlier developer
  517. * misunderstood the meaning of the 'irq' fields and the driver did
  518. * not support the standard fields. To avoid invalidating existing
  519. * user documentation, we report and accept changes through either the
  520. * standard or 'irq' fields. If both are changed at the same time, we
  521. * prefer the standard field.
  522. *
  523. * We implement adaptive IRQ moderation, but use a different algorithm
  524. * from that assumed in the definition of struct ethtool_coalesce.
  525. * Therefore we do not use any of the adaptive moderation parameters
  526. * in it.
  527. */
  528. static int efx_ethtool_get_coalesce(struct net_device *net_dev,
  529. struct ethtool_coalesce *coalesce)
  530. {
  531. struct efx_nic *efx = netdev_priv(net_dev);
  532. unsigned int tx_usecs, rx_usecs;
  533. bool rx_adaptive;
  534. efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
  535. coalesce->tx_coalesce_usecs = tx_usecs;
  536. coalesce->tx_coalesce_usecs_irq = tx_usecs;
  537. coalesce->rx_coalesce_usecs = rx_usecs;
  538. coalesce->rx_coalesce_usecs_irq = rx_usecs;
  539. coalesce->use_adaptive_rx_coalesce = rx_adaptive;
  540. return 0;
  541. }
  542. static int efx_ethtool_set_coalesce(struct net_device *net_dev,
  543. struct ethtool_coalesce *coalesce)
  544. {
  545. struct efx_nic *efx = netdev_priv(net_dev);
  546. struct efx_channel *channel;
  547. unsigned int tx_usecs, rx_usecs;
  548. bool adaptive, rx_may_override_tx;
  549. int rc;
  550. if (coalesce->use_adaptive_tx_coalesce)
  551. return -EINVAL;
  552. efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
  553. if (coalesce->rx_coalesce_usecs != rx_usecs)
  554. rx_usecs = coalesce->rx_coalesce_usecs;
  555. else
  556. rx_usecs = coalesce->rx_coalesce_usecs_irq;
  557. adaptive = coalesce->use_adaptive_rx_coalesce;
  558. /* If channels are shared, TX IRQ moderation can be quietly
  559. * overridden unless it is changed from its old value.
  560. */
  561. rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
  562. coalesce->tx_coalesce_usecs_irq == tx_usecs);
  563. if (coalesce->tx_coalesce_usecs != tx_usecs)
  564. tx_usecs = coalesce->tx_coalesce_usecs;
  565. else
  566. tx_usecs = coalesce->tx_coalesce_usecs_irq;
  567. rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
  568. rx_may_override_tx);
  569. if (rc != 0)
  570. return rc;
  571. efx_for_each_channel(channel, efx)
  572. efx->type->push_irq_moderation(channel);
  573. return 0;
  574. }
  575. static void efx_ethtool_get_ringparam(struct net_device *net_dev,
  576. struct ethtool_ringparam *ring)
  577. {
  578. struct efx_nic *efx = netdev_priv(net_dev);
  579. ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
  580. ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
  581. ring->rx_pending = efx->rxq_entries;
  582. ring->tx_pending = efx->txq_entries;
  583. }
  584. static int efx_ethtool_set_ringparam(struct net_device *net_dev,
  585. struct ethtool_ringparam *ring)
  586. {
  587. struct efx_nic *efx = netdev_priv(net_dev);
  588. u32 txq_entries;
  589. if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
  590. ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
  591. ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
  592. return -EINVAL;
  593. if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
  594. netif_err(efx, drv, efx->net_dev,
  595. "RX queues cannot be smaller than %u\n",
  596. EFX_RXQ_MIN_ENT);
  597. return -EINVAL;
  598. }
  599. txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
  600. if (txq_entries != ring->tx_pending)
  601. netif_warn(efx, drv, efx->net_dev,
  602. "increasing TX queue size to minimum of %u\n",
  603. txq_entries);
  604. return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
  605. }
  606. static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
  607. struct ethtool_pauseparam *pause)
  608. {
  609. struct efx_nic *efx = netdev_priv(net_dev);
  610. u8 wanted_fc, old_fc;
  611. u32 old_adv;
  612. int rc = 0;
  613. mutex_lock(&efx->mac_lock);
  614. wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
  615. (pause->tx_pause ? EFX_FC_TX : 0) |
  616. (pause->autoneg ? EFX_FC_AUTO : 0));
  617. if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
  618. netif_dbg(efx, drv, efx->net_dev,
  619. "Flow control unsupported: tx ON rx OFF\n");
  620. rc = -EINVAL;
  621. goto out;
  622. }
  623. if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
  624. netif_dbg(efx, drv, efx->net_dev,
  625. "Autonegotiation is disabled\n");
  626. rc = -EINVAL;
  627. goto out;
  628. }
  629. /* Hook for Falcon bug 11482 workaround */
  630. if (efx->type->prepare_enable_fc_tx &&
  631. (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
  632. efx->type->prepare_enable_fc_tx(efx);
  633. old_adv = efx->link_advertising[0];
  634. old_fc = efx->wanted_fc;
  635. efx_link_set_wanted_fc(efx, wanted_fc);
  636. if (efx->link_advertising[0] != old_adv ||
  637. (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
  638. rc = efx->phy_op->reconfigure(efx);
  639. if (rc) {
  640. netif_err(efx, drv, efx->net_dev,
  641. "Unable to advertise requested flow "
  642. "control setting\n");
  643. goto out;
  644. }
  645. }
  646. /* Reconfigure the MAC. The PHY *may* generate a link state change event
  647. * if the user just changed the advertised capabilities, but there's no
  648. * harm doing this twice */
  649. efx_mac_reconfigure(efx);
  650. out:
  651. mutex_unlock(&efx->mac_lock);
  652. return rc;
  653. }
  654. static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
  655. struct ethtool_pauseparam *pause)
  656. {
  657. struct efx_nic *efx = netdev_priv(net_dev);
  658. pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
  659. pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
  660. pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
  661. }
  662. static void efx_ethtool_get_wol(struct net_device *net_dev,
  663. struct ethtool_wolinfo *wol)
  664. {
  665. struct efx_nic *efx = netdev_priv(net_dev);
  666. return efx->type->get_wol(efx, wol);
  667. }
  668. static int efx_ethtool_set_wol(struct net_device *net_dev,
  669. struct ethtool_wolinfo *wol)
  670. {
  671. struct efx_nic *efx = netdev_priv(net_dev);
  672. return efx->type->set_wol(efx, wol->wolopts);
  673. }
  674. static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
  675. {
  676. struct efx_nic *efx = netdev_priv(net_dev);
  677. int rc;
  678. rc = efx->type->map_reset_flags(flags);
  679. if (rc < 0)
  680. return rc;
  681. return efx_reset(efx, rc);
  682. }
  683. /* MAC address mask including only I/G bit */
  684. static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
  685. #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
  686. #define IP_PROTO_FULL_MASK 0xFF
  687. #define PORT_FULL_MASK ((__force __be16)~0)
  688. #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
  689. static inline void ip6_fill_mask(__be32 *mask)
  690. {
  691. mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
  692. }
  693. static int efx_ethtool_get_class_rule(struct efx_nic *efx,
  694. struct ethtool_rx_flow_spec *rule,
  695. u32 *rss_context)
  696. {
  697. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  698. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  699. struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
  700. struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
  701. struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
  702. struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
  703. struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
  704. struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
  705. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  706. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  707. struct efx_filter_spec spec;
  708. int rc;
  709. rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
  710. rule->location, &spec);
  711. if (rc)
  712. return rc;
  713. if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
  714. rule->ring_cookie = RX_CLS_FLOW_DISC;
  715. else
  716. rule->ring_cookie = spec.dmaq_id;
  717. if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
  718. spec.ether_type == htons(ETH_P_IP) &&
  719. (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
  720. (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
  721. !(spec.match_flags &
  722. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  723. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  724. EFX_FILTER_MATCH_IP_PROTO |
  725. EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
  726. rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
  727. TCP_V4_FLOW : UDP_V4_FLOW);
  728. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  729. ip_entry->ip4dst = spec.loc_host[0];
  730. ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
  731. }
  732. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  733. ip_entry->ip4src = spec.rem_host[0];
  734. ip_mask->ip4src = IP4_ADDR_FULL_MASK;
  735. }
  736. if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
  737. ip_entry->pdst = spec.loc_port;
  738. ip_mask->pdst = PORT_FULL_MASK;
  739. }
  740. if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
  741. ip_entry->psrc = spec.rem_port;
  742. ip_mask->psrc = PORT_FULL_MASK;
  743. }
  744. } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
  745. spec.ether_type == htons(ETH_P_IPV6) &&
  746. (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
  747. (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
  748. !(spec.match_flags &
  749. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  750. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  751. EFX_FILTER_MATCH_IP_PROTO |
  752. EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
  753. rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
  754. TCP_V6_FLOW : UDP_V6_FLOW);
  755. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  756. memcpy(ip6_entry->ip6dst, spec.loc_host,
  757. sizeof(ip6_entry->ip6dst));
  758. ip6_fill_mask(ip6_mask->ip6dst);
  759. }
  760. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  761. memcpy(ip6_entry->ip6src, spec.rem_host,
  762. sizeof(ip6_entry->ip6src));
  763. ip6_fill_mask(ip6_mask->ip6src);
  764. }
  765. if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
  766. ip6_entry->pdst = spec.loc_port;
  767. ip6_mask->pdst = PORT_FULL_MASK;
  768. }
  769. if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
  770. ip6_entry->psrc = spec.rem_port;
  771. ip6_mask->psrc = PORT_FULL_MASK;
  772. }
  773. } else if (!(spec.match_flags &
  774. ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
  775. EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
  776. EFX_FILTER_MATCH_OUTER_VID))) {
  777. rule->flow_type = ETHER_FLOW;
  778. if (spec.match_flags &
  779. (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
  780. ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
  781. if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
  782. eth_broadcast_addr(mac_mask->h_dest);
  783. else
  784. ether_addr_copy(mac_mask->h_dest,
  785. mac_addr_ig_mask);
  786. }
  787. if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
  788. ether_addr_copy(mac_entry->h_source, spec.rem_mac);
  789. eth_broadcast_addr(mac_mask->h_source);
  790. }
  791. if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
  792. mac_entry->h_proto = spec.ether_type;
  793. mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
  794. }
  795. } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
  796. spec.ether_type == htons(ETH_P_IP) &&
  797. !(spec.match_flags &
  798. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  799. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  800. EFX_FILTER_MATCH_IP_PROTO))) {
  801. rule->flow_type = IPV4_USER_FLOW;
  802. uip_entry->ip_ver = ETH_RX_NFC_IP4;
  803. if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
  804. uip_mask->proto = IP_PROTO_FULL_MASK;
  805. uip_entry->proto = spec.ip_proto;
  806. }
  807. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  808. uip_entry->ip4dst = spec.loc_host[0];
  809. uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
  810. }
  811. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  812. uip_entry->ip4src = spec.rem_host[0];
  813. uip_mask->ip4src = IP4_ADDR_FULL_MASK;
  814. }
  815. } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
  816. spec.ether_type == htons(ETH_P_IPV6) &&
  817. !(spec.match_flags &
  818. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  819. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  820. EFX_FILTER_MATCH_IP_PROTO))) {
  821. rule->flow_type = IPV6_USER_FLOW;
  822. if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
  823. uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
  824. uip6_entry->l4_proto = spec.ip_proto;
  825. }
  826. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  827. memcpy(uip6_entry->ip6dst, spec.loc_host,
  828. sizeof(uip6_entry->ip6dst));
  829. ip6_fill_mask(uip6_mask->ip6dst);
  830. }
  831. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  832. memcpy(uip6_entry->ip6src, spec.rem_host,
  833. sizeof(uip6_entry->ip6src));
  834. ip6_fill_mask(uip6_mask->ip6src);
  835. }
  836. } else {
  837. /* The above should handle all filters that we insert */
  838. WARN_ON(1);
  839. return -EINVAL;
  840. }
  841. if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
  842. rule->flow_type |= FLOW_EXT;
  843. rule->h_ext.vlan_tci = spec.outer_vid;
  844. rule->m_ext.vlan_tci = htons(0xfff);
  845. }
  846. if (spec.flags & EFX_FILTER_FLAG_RX_RSS) {
  847. rule->flow_type |= FLOW_RSS;
  848. *rss_context = spec.rss_context;
  849. }
  850. return rc;
  851. }
  852. static int
  853. efx_ethtool_get_rxnfc(struct net_device *net_dev,
  854. struct ethtool_rxnfc *info, u32 *rule_locs)
  855. {
  856. struct efx_nic *efx = netdev_priv(net_dev);
  857. u32 rss_context = 0;
  858. s32 rc = 0;
  859. switch (info->cmd) {
  860. case ETHTOOL_GRXRINGS:
  861. info->data = efx->n_rx_channels;
  862. return 0;
  863. case ETHTOOL_GRXFH: {
  864. struct efx_rss_context *ctx = &efx->rss_context;
  865. mutex_lock(&efx->rss_lock);
  866. if (info->flow_type & FLOW_RSS && info->rss_context) {
  867. ctx = efx_find_rss_context_entry(efx, info->rss_context);
  868. if (!ctx) {
  869. rc = -ENOENT;
  870. goto out_unlock;
  871. }
  872. }
  873. info->data = 0;
  874. if (!efx_rss_active(ctx)) /* No RSS */
  875. goto out_unlock;
  876. switch (info->flow_type & ~FLOW_RSS) {
  877. case UDP_V4_FLOW:
  878. if (ctx->rx_hash_udp_4tuple)
  879. /* fall through */
  880. case TCP_V4_FLOW:
  881. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  882. /* fall through */
  883. case SCTP_V4_FLOW:
  884. case AH_ESP_V4_FLOW:
  885. case IPV4_FLOW:
  886. info->data |= RXH_IP_SRC | RXH_IP_DST;
  887. break;
  888. case UDP_V6_FLOW:
  889. if (ctx->rx_hash_udp_4tuple)
  890. /* fall through */
  891. case TCP_V6_FLOW:
  892. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  893. /* fall through */
  894. case SCTP_V6_FLOW:
  895. case AH_ESP_V6_FLOW:
  896. case IPV6_FLOW:
  897. info->data |= RXH_IP_SRC | RXH_IP_DST;
  898. break;
  899. default:
  900. break;
  901. }
  902. out_unlock:
  903. mutex_unlock(&efx->rss_lock);
  904. return rc;
  905. }
  906. case ETHTOOL_GRXCLSRLCNT:
  907. info->data = efx_filter_get_rx_id_limit(efx);
  908. if (info->data == 0)
  909. return -EOPNOTSUPP;
  910. info->data |= RX_CLS_LOC_SPECIAL;
  911. info->rule_cnt =
  912. efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
  913. return 0;
  914. case ETHTOOL_GRXCLSRULE:
  915. if (efx_filter_get_rx_id_limit(efx) == 0)
  916. return -EOPNOTSUPP;
  917. rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context);
  918. if (rc < 0)
  919. return rc;
  920. if (info->fs.flow_type & FLOW_RSS)
  921. info->rss_context = rss_context;
  922. return 0;
  923. case ETHTOOL_GRXCLSRLALL:
  924. info->data = efx_filter_get_rx_id_limit(efx);
  925. if (info->data == 0)
  926. return -EOPNOTSUPP;
  927. rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
  928. rule_locs, info->rule_cnt);
  929. if (rc < 0)
  930. return rc;
  931. info->rule_cnt = rc;
  932. return 0;
  933. default:
  934. return -EOPNOTSUPP;
  935. }
  936. }
  937. static inline bool ip6_mask_is_full(__be32 mask[4])
  938. {
  939. return !~(mask[0] & mask[1] & mask[2] & mask[3]);
  940. }
  941. static inline bool ip6_mask_is_empty(__be32 mask[4])
  942. {
  943. return !(mask[0] | mask[1] | mask[2] | mask[3]);
  944. }
  945. static int efx_ethtool_set_class_rule(struct efx_nic *efx,
  946. struct ethtool_rx_flow_spec *rule,
  947. u32 rss_context)
  948. {
  949. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  950. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  951. struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
  952. struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
  953. struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
  954. struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
  955. struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
  956. struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
  957. u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
  958. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  959. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  960. enum efx_filter_flags flags = 0;
  961. struct efx_filter_spec spec;
  962. int rc;
  963. /* Check that user wants us to choose the location */
  964. if (rule->location != RX_CLS_LOC_ANY)
  965. return -EINVAL;
  966. /* Range-check ring_cookie */
  967. if (rule->ring_cookie >= efx->n_rx_channels &&
  968. rule->ring_cookie != RX_CLS_FLOW_DISC)
  969. return -EINVAL;
  970. /* Check for unsupported extensions */
  971. if ((rule->flow_type & FLOW_EXT) &&
  972. (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
  973. rule->m_ext.data[1]))
  974. return -EINVAL;
  975. if (efx->rx_scatter)
  976. flags |= EFX_FILTER_FLAG_RX_SCATTER;
  977. if (rule->flow_type & FLOW_RSS)
  978. flags |= EFX_FILTER_FLAG_RX_RSS;
  979. efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags,
  980. (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
  981. EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
  982. if (rule->flow_type & FLOW_RSS)
  983. spec.rss_context = rss_context;
  984. switch (flow_type) {
  985. case TCP_V4_FLOW:
  986. case UDP_V4_FLOW:
  987. spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
  988. EFX_FILTER_MATCH_IP_PROTO);
  989. spec.ether_type = htons(ETH_P_IP);
  990. spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
  991. : IPPROTO_UDP;
  992. if (ip_mask->ip4dst) {
  993. if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
  994. return -EINVAL;
  995. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  996. spec.loc_host[0] = ip_entry->ip4dst;
  997. }
  998. if (ip_mask->ip4src) {
  999. if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
  1000. return -EINVAL;
  1001. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  1002. spec.rem_host[0] = ip_entry->ip4src;
  1003. }
  1004. if (ip_mask->pdst) {
  1005. if (ip_mask->pdst != PORT_FULL_MASK)
  1006. return -EINVAL;
  1007. spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
  1008. spec.loc_port = ip_entry->pdst;
  1009. }
  1010. if (ip_mask->psrc) {
  1011. if (ip_mask->psrc != PORT_FULL_MASK)
  1012. return -EINVAL;
  1013. spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
  1014. spec.rem_port = ip_entry->psrc;
  1015. }
  1016. if (ip_mask->tos)
  1017. return -EINVAL;
  1018. break;
  1019. case TCP_V6_FLOW:
  1020. case UDP_V6_FLOW:
  1021. spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
  1022. EFX_FILTER_MATCH_IP_PROTO);
  1023. spec.ether_type = htons(ETH_P_IPV6);
  1024. spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
  1025. : IPPROTO_UDP;
  1026. if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
  1027. if (!ip6_mask_is_full(ip6_mask->ip6dst))
  1028. return -EINVAL;
  1029. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  1030. memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
  1031. }
  1032. if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
  1033. if (!ip6_mask_is_full(ip6_mask->ip6src))
  1034. return -EINVAL;
  1035. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  1036. memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
  1037. }
  1038. if (ip6_mask->pdst) {
  1039. if (ip6_mask->pdst != PORT_FULL_MASK)
  1040. return -EINVAL;
  1041. spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
  1042. spec.loc_port = ip6_entry->pdst;
  1043. }
  1044. if (ip6_mask->psrc) {
  1045. if (ip6_mask->psrc != PORT_FULL_MASK)
  1046. return -EINVAL;
  1047. spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
  1048. spec.rem_port = ip6_entry->psrc;
  1049. }
  1050. if (ip6_mask->tclass)
  1051. return -EINVAL;
  1052. break;
  1053. case IPV4_USER_FLOW:
  1054. if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
  1055. uip_entry->ip_ver != ETH_RX_NFC_IP4)
  1056. return -EINVAL;
  1057. spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
  1058. spec.ether_type = htons(ETH_P_IP);
  1059. if (uip_mask->ip4dst) {
  1060. if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
  1061. return -EINVAL;
  1062. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  1063. spec.loc_host[0] = uip_entry->ip4dst;
  1064. }
  1065. if (uip_mask->ip4src) {
  1066. if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
  1067. return -EINVAL;
  1068. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  1069. spec.rem_host[0] = uip_entry->ip4src;
  1070. }
  1071. if (uip_mask->proto) {
  1072. if (uip_mask->proto != IP_PROTO_FULL_MASK)
  1073. return -EINVAL;
  1074. spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
  1075. spec.ip_proto = uip_entry->proto;
  1076. }
  1077. break;
  1078. case IPV6_USER_FLOW:
  1079. if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
  1080. return -EINVAL;
  1081. spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
  1082. spec.ether_type = htons(ETH_P_IPV6);
  1083. if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
  1084. if (!ip6_mask_is_full(uip6_mask->ip6dst))
  1085. return -EINVAL;
  1086. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  1087. memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
  1088. }
  1089. if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
  1090. if (!ip6_mask_is_full(uip6_mask->ip6src))
  1091. return -EINVAL;
  1092. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  1093. memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
  1094. }
  1095. if (uip6_mask->l4_proto) {
  1096. if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
  1097. return -EINVAL;
  1098. spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
  1099. spec.ip_proto = uip6_entry->l4_proto;
  1100. }
  1101. break;
  1102. case ETHER_FLOW:
  1103. if (!is_zero_ether_addr(mac_mask->h_dest)) {
  1104. if (ether_addr_equal(mac_mask->h_dest,
  1105. mac_addr_ig_mask))
  1106. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
  1107. else if (is_broadcast_ether_addr(mac_mask->h_dest))
  1108. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
  1109. else
  1110. return -EINVAL;
  1111. ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
  1112. }
  1113. if (!is_zero_ether_addr(mac_mask->h_source)) {
  1114. if (!is_broadcast_ether_addr(mac_mask->h_source))
  1115. return -EINVAL;
  1116. spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
  1117. ether_addr_copy(spec.rem_mac, mac_entry->h_source);
  1118. }
  1119. if (mac_mask->h_proto) {
  1120. if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
  1121. return -EINVAL;
  1122. spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
  1123. spec.ether_type = mac_entry->h_proto;
  1124. }
  1125. break;
  1126. default:
  1127. return -EINVAL;
  1128. }
  1129. if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
  1130. if (rule->m_ext.vlan_tci != htons(0xfff))
  1131. return -EINVAL;
  1132. spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
  1133. spec.outer_vid = rule->h_ext.vlan_tci;
  1134. }
  1135. rc = efx_filter_insert_filter(efx, &spec, true);
  1136. if (rc < 0)
  1137. return rc;
  1138. rule->location = rc;
  1139. return 0;
  1140. }
  1141. static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
  1142. struct ethtool_rxnfc *info)
  1143. {
  1144. struct efx_nic *efx = netdev_priv(net_dev);
  1145. if (efx_filter_get_rx_id_limit(efx) == 0)
  1146. return -EOPNOTSUPP;
  1147. switch (info->cmd) {
  1148. case ETHTOOL_SRXCLSRLINS:
  1149. return efx_ethtool_set_class_rule(efx, &info->fs,
  1150. info->rss_context);
  1151. case ETHTOOL_SRXCLSRLDEL:
  1152. return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
  1153. info->fs.location);
  1154. default:
  1155. return -EOPNOTSUPP;
  1156. }
  1157. }
  1158. static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
  1159. {
  1160. struct efx_nic *efx = netdev_priv(net_dev);
  1161. if (efx->n_rx_channels == 1)
  1162. return 0;
  1163. return ARRAY_SIZE(efx->rss_context.rx_indir_table);
  1164. }
  1165. static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
  1166. {
  1167. struct efx_nic *efx = netdev_priv(net_dev);
  1168. return efx->type->rx_hash_key_size;
  1169. }
  1170. static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
  1171. u8 *hfunc)
  1172. {
  1173. struct efx_nic *efx = netdev_priv(net_dev);
  1174. int rc;
  1175. rc = efx->type->rx_pull_rss_config(efx);
  1176. if (rc)
  1177. return rc;
  1178. if (hfunc)
  1179. *hfunc = ETH_RSS_HASH_TOP;
  1180. if (indir)
  1181. memcpy(indir, efx->rss_context.rx_indir_table,
  1182. sizeof(efx->rss_context.rx_indir_table));
  1183. if (key)
  1184. memcpy(key, efx->rss_context.rx_hash_key,
  1185. efx->type->rx_hash_key_size);
  1186. return 0;
  1187. }
  1188. static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
  1189. const u8 *key, const u8 hfunc)
  1190. {
  1191. struct efx_nic *efx = netdev_priv(net_dev);
  1192. /* Hash function is Toeplitz, cannot be changed */
  1193. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  1194. return -EOPNOTSUPP;
  1195. if (!indir && !key)
  1196. return 0;
  1197. if (!key)
  1198. key = efx->rss_context.rx_hash_key;
  1199. if (!indir)
  1200. indir = efx->rss_context.rx_indir_table;
  1201. return efx->type->rx_push_rss_config(efx, true, indir, key);
  1202. }
  1203. static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
  1204. u8 *key, u8 *hfunc, u32 rss_context)
  1205. {
  1206. struct efx_nic *efx = netdev_priv(net_dev);
  1207. struct efx_rss_context *ctx;
  1208. int rc = 0;
  1209. if (!efx->type->rx_pull_rss_context_config)
  1210. return -EOPNOTSUPP;
  1211. mutex_lock(&efx->rss_lock);
  1212. ctx = efx_find_rss_context_entry(efx, rss_context);
  1213. if (!ctx) {
  1214. rc = -ENOENT;
  1215. goto out_unlock;
  1216. }
  1217. rc = efx->type->rx_pull_rss_context_config(efx, ctx);
  1218. if (rc)
  1219. goto out_unlock;
  1220. if (hfunc)
  1221. *hfunc = ETH_RSS_HASH_TOP;
  1222. if (indir)
  1223. memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
  1224. if (key)
  1225. memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
  1226. out_unlock:
  1227. mutex_unlock(&efx->rss_lock);
  1228. return rc;
  1229. }
  1230. static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
  1231. const u32 *indir, const u8 *key,
  1232. const u8 hfunc, u32 *rss_context,
  1233. bool delete)
  1234. {
  1235. struct efx_nic *efx = netdev_priv(net_dev);
  1236. struct efx_rss_context *ctx;
  1237. bool allocated = false;
  1238. int rc;
  1239. if (!efx->type->rx_push_rss_context_config)
  1240. return -EOPNOTSUPP;
  1241. /* Hash function is Toeplitz, cannot be changed */
  1242. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  1243. return -EOPNOTSUPP;
  1244. mutex_lock(&efx->rss_lock);
  1245. if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
  1246. if (delete) {
  1247. /* alloc + delete == Nothing to do */
  1248. rc = -EINVAL;
  1249. goto out_unlock;
  1250. }
  1251. ctx = efx_alloc_rss_context_entry(efx);
  1252. if (!ctx) {
  1253. rc = -ENOMEM;
  1254. goto out_unlock;
  1255. }
  1256. ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
  1257. /* Initialise indir table and key to defaults */
  1258. efx_set_default_rx_indir_table(efx, ctx);
  1259. netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
  1260. allocated = true;
  1261. } else {
  1262. ctx = efx_find_rss_context_entry(efx, *rss_context);
  1263. if (!ctx) {
  1264. rc = -ENOENT;
  1265. goto out_unlock;
  1266. }
  1267. }
  1268. if (delete) {
  1269. /* delete this context */
  1270. rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
  1271. if (!rc)
  1272. efx_free_rss_context_entry(ctx);
  1273. goto out_unlock;
  1274. }
  1275. if (!key)
  1276. key = ctx->rx_hash_key;
  1277. if (!indir)
  1278. indir = ctx->rx_indir_table;
  1279. rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
  1280. if (rc && allocated)
  1281. efx_free_rss_context_entry(ctx);
  1282. else
  1283. *rss_context = ctx->user_id;
  1284. out_unlock:
  1285. mutex_unlock(&efx->rss_lock);
  1286. return rc;
  1287. }
  1288. static int efx_ethtool_get_ts_info(struct net_device *net_dev,
  1289. struct ethtool_ts_info *ts_info)
  1290. {
  1291. struct efx_nic *efx = netdev_priv(net_dev);
  1292. /* Software capabilities */
  1293. ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
  1294. SOF_TIMESTAMPING_SOFTWARE);
  1295. ts_info->phc_index = -1;
  1296. efx_ptp_get_ts_info(efx, ts_info);
  1297. return 0;
  1298. }
  1299. static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
  1300. struct ethtool_eeprom *ee,
  1301. u8 *data)
  1302. {
  1303. struct efx_nic *efx = netdev_priv(net_dev);
  1304. int ret;
  1305. if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
  1306. return -EOPNOTSUPP;
  1307. mutex_lock(&efx->mac_lock);
  1308. ret = efx->phy_op->get_module_eeprom(efx, ee, data);
  1309. mutex_unlock(&efx->mac_lock);
  1310. return ret;
  1311. }
  1312. static int efx_ethtool_get_module_info(struct net_device *net_dev,
  1313. struct ethtool_modinfo *modinfo)
  1314. {
  1315. struct efx_nic *efx = netdev_priv(net_dev);
  1316. int ret;
  1317. if (!efx->phy_op || !efx->phy_op->get_module_info)
  1318. return -EOPNOTSUPP;
  1319. mutex_lock(&efx->mac_lock);
  1320. ret = efx->phy_op->get_module_info(efx, modinfo);
  1321. mutex_unlock(&efx->mac_lock);
  1322. return ret;
  1323. }
  1324. static int efx_ethtool_get_fecparam(struct net_device *net_dev,
  1325. struct ethtool_fecparam *fecparam)
  1326. {
  1327. struct efx_nic *efx = netdev_priv(net_dev);
  1328. int rc;
  1329. if (!efx->phy_op || !efx->phy_op->get_fecparam)
  1330. return -EOPNOTSUPP;
  1331. mutex_lock(&efx->mac_lock);
  1332. rc = efx->phy_op->get_fecparam(efx, fecparam);
  1333. mutex_unlock(&efx->mac_lock);
  1334. return rc;
  1335. }
  1336. static int efx_ethtool_set_fecparam(struct net_device *net_dev,
  1337. struct ethtool_fecparam *fecparam)
  1338. {
  1339. struct efx_nic *efx = netdev_priv(net_dev);
  1340. int rc;
  1341. if (!efx->phy_op || !efx->phy_op->get_fecparam)
  1342. return -EOPNOTSUPP;
  1343. mutex_lock(&efx->mac_lock);
  1344. rc = efx->phy_op->set_fecparam(efx, fecparam);
  1345. mutex_unlock(&efx->mac_lock);
  1346. return rc;
  1347. }
  1348. const struct ethtool_ops efx_ethtool_ops = {
  1349. .get_drvinfo = efx_ethtool_get_drvinfo,
  1350. .get_regs_len = efx_ethtool_get_regs_len,
  1351. .get_regs = efx_ethtool_get_regs,
  1352. .get_msglevel = efx_ethtool_get_msglevel,
  1353. .set_msglevel = efx_ethtool_set_msglevel,
  1354. .nway_reset = efx_ethtool_nway_reset,
  1355. .get_link = ethtool_op_get_link,
  1356. .get_coalesce = efx_ethtool_get_coalesce,
  1357. .set_coalesce = efx_ethtool_set_coalesce,
  1358. .get_ringparam = efx_ethtool_get_ringparam,
  1359. .set_ringparam = efx_ethtool_set_ringparam,
  1360. .get_pauseparam = efx_ethtool_get_pauseparam,
  1361. .set_pauseparam = efx_ethtool_set_pauseparam,
  1362. .get_sset_count = efx_ethtool_get_sset_count,
  1363. .self_test = efx_ethtool_self_test,
  1364. .get_strings = efx_ethtool_get_strings,
  1365. .set_phys_id = efx_ethtool_phys_id,
  1366. .get_ethtool_stats = efx_ethtool_get_stats,
  1367. .get_wol = efx_ethtool_get_wol,
  1368. .set_wol = efx_ethtool_set_wol,
  1369. .reset = efx_ethtool_reset,
  1370. .get_rxnfc = efx_ethtool_get_rxnfc,
  1371. .set_rxnfc = efx_ethtool_set_rxnfc,
  1372. .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
  1373. .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
  1374. .get_rxfh = efx_ethtool_get_rxfh,
  1375. .set_rxfh = efx_ethtool_set_rxfh,
  1376. .get_rxfh_context = efx_ethtool_get_rxfh_context,
  1377. .set_rxfh_context = efx_ethtool_set_rxfh_context,
  1378. .get_ts_info = efx_ethtool_get_ts_info,
  1379. .get_module_info = efx_ethtool_get_module_info,
  1380. .get_module_eeprom = efx_ethtool_get_module_eeprom,
  1381. .get_link_ksettings = efx_ethtool_get_link_ksettings,
  1382. .set_link_ksettings = efx_ethtool_set_link_ksettings,
  1383. .get_fecparam = efx_ethtool_get_fecparam,
  1384. .set_fecparam = efx_ethtool_set_fecparam,
  1385. };