nfp_net_ethtool.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /*
  2. * Copyright (C) 2015 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * nfp_net_ethtool.c
  35. * Netronome network device driver: ethtool support
  36. * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
  37. * Jason McMullan <jason.mcmullan@netronome.com>
  38. * Rolf Neugebauer <rolf.neugebauer@netronome.com>
  39. * Brad Petrus <brad.petrus@netronome.com>
  40. */
  41. #include <linux/kernel.h>
  42. #include <linux/netdevice.h>
  43. #include <linux/etherdevice.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/pci.h>
  46. #include <linux/ethtool.h>
  47. #include "nfp_net_ctrl.h"
  48. #include "nfp_net.h"
  49. /* Support for stats. Returns netdev, driver, and device stats */
  50. enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS };
  51. struct _nfp_net_et_stats {
  52. char name[ETH_GSTRING_LEN];
  53. int type;
  54. int sz;
  55. int off;
  56. };
  57. #define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \
  58. FIELD_SIZEOF(struct net_device_stats, m), \
  59. offsetof(struct net_device_stats, m)
  60. /* For stats in the control BAR (other than Q stats) */
  61. #define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \
  62. sizeof(u64), \
  63. (m)
  64. static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
  65. /* netdev stats */
  66. {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)},
  67. {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)},
  68. {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)},
  69. {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)},
  70. {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)},
  71. {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)},
  72. {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)},
  73. {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)},
  74. {"multicast", NN_ET_NETDEV_STAT(multicast)},
  75. {"collisions", NN_ET_NETDEV_STAT(collisions)},
  76. {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)},
  77. {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)},
  78. {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)},
  79. {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)},
  80. {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)},
  81. {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)},
  82. {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)},
  83. {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)},
  84. /* Stats from the device */
  85. {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)},
  86. {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)},
  87. {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)},
  88. {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)},
  89. {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)},
  90. {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)},
  91. {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)},
  92. {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)},
  93. {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)},
  94. {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)},
  95. {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)},
  96. {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)},
  97. {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)},
  98. {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)},
  99. {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)},
  100. {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
  101. {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
  102. {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
  103. {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
  104. {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
  105. /* see comments in outro functions in nfp_bpf_jit.c to find out
  106. * how different BPF modes use app-specific counters
  107. */
  108. {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
  109. {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
  110. {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
  111. {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
  112. {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
  113. {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
  114. };
  115. #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
  116. #define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
  117. #define NN_ET_RVEC_GATHER_STATS 7
  118. #define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
  119. #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
  120. NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
  121. static void nfp_net_get_drvinfo(struct net_device *netdev,
  122. struct ethtool_drvinfo *drvinfo)
  123. {
  124. struct nfp_net *nn = netdev_priv(netdev);
  125. strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver));
  126. strlcpy(drvinfo->version, nfp_net_driver_version,
  127. sizeof(drvinfo->version));
  128. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  129. "%d.%d.%d.%d",
  130. nn->fw_ver.resv, nn->fw_ver.class,
  131. nn->fw_ver.major, nn->fw_ver.minor);
  132. strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
  133. sizeof(drvinfo->bus_info));
  134. drvinfo->n_stats = NN_ET_STATS_LEN;
  135. drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
  136. }
  137. static void nfp_net_get_ringparam(struct net_device *netdev,
  138. struct ethtool_ringparam *ring)
  139. {
  140. struct nfp_net *nn = netdev_priv(netdev);
  141. ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
  142. ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
  143. ring->rx_pending = nn->rxd_cnt;
  144. ring->tx_pending = nn->txd_cnt;
  145. }
  146. static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
  147. {
  148. struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
  149. struct nfp_net_ring_set rx = {
  150. .n_rings = nn->num_rx_rings,
  151. .mtu = nn->netdev->mtu,
  152. .dcnt = rxd_cnt,
  153. };
  154. struct nfp_net_ring_set tx = {
  155. .n_rings = nn->num_tx_rings,
  156. .dcnt = txd_cnt,
  157. };
  158. if (nn->rxd_cnt != rxd_cnt)
  159. reconfig_rx = &rx;
  160. if (nn->txd_cnt != txd_cnt)
  161. reconfig_tx = &tx;
  162. return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
  163. reconfig_rx, reconfig_tx);
  164. }
  165. static int nfp_net_set_ringparam(struct net_device *netdev,
  166. struct ethtool_ringparam *ring)
  167. {
  168. struct nfp_net *nn = netdev_priv(netdev);
  169. u32 rxd_cnt, txd_cnt;
  170. /* We don't have separate queues/rings for small/large frames. */
  171. if (ring->rx_mini_pending || ring->rx_jumbo_pending)
  172. return -EINVAL;
  173. /* Round up to supported values */
  174. rxd_cnt = roundup_pow_of_two(ring->rx_pending);
  175. txd_cnt = roundup_pow_of_two(ring->tx_pending);
  176. if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
  177. txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
  178. return -EINVAL;
  179. if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
  180. return 0;
  181. nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
  182. nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
  183. return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
  184. }
  185. static void nfp_net_get_strings(struct net_device *netdev,
  186. u32 stringset, u8 *data)
  187. {
  188. struct nfp_net *nn = netdev_priv(netdev);
  189. u8 *p = data;
  190. int i;
  191. switch (stringset) {
  192. case ETH_SS_STATS:
  193. for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
  194. memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
  195. p += ETH_GSTRING_LEN;
  196. }
  197. for (i = 0; i < nn->num_r_vecs; i++) {
  198. sprintf(p, "rvec_%u_rx_pkts", i);
  199. p += ETH_GSTRING_LEN;
  200. sprintf(p, "rvec_%u_tx_pkts", i);
  201. p += ETH_GSTRING_LEN;
  202. sprintf(p, "rvec_%u_tx_busy", i);
  203. p += ETH_GSTRING_LEN;
  204. }
  205. strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN);
  206. p += ETH_GSTRING_LEN;
  207. strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN);
  208. p += ETH_GSTRING_LEN;
  209. strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN);
  210. p += ETH_GSTRING_LEN;
  211. strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN);
  212. p += ETH_GSTRING_LEN;
  213. strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN);
  214. p += ETH_GSTRING_LEN;
  215. strncpy(p, "tx_gather", ETH_GSTRING_LEN);
  216. p += ETH_GSTRING_LEN;
  217. strncpy(p, "tx_lso", ETH_GSTRING_LEN);
  218. p += ETH_GSTRING_LEN;
  219. for (i = 0; i < nn->num_tx_rings; i++) {
  220. sprintf(p, "txq_%u_pkts", i);
  221. p += ETH_GSTRING_LEN;
  222. sprintf(p, "txq_%u_bytes", i);
  223. p += ETH_GSTRING_LEN;
  224. }
  225. for (i = 0; i < nn->num_rx_rings; i++) {
  226. sprintf(p, "rxq_%u_pkts", i);
  227. p += ETH_GSTRING_LEN;
  228. sprintf(p, "rxq_%u_bytes", i);
  229. p += ETH_GSTRING_LEN;
  230. }
  231. break;
  232. }
  233. }
  234. static void nfp_net_get_stats(struct net_device *netdev,
  235. struct ethtool_stats *stats, u64 *data)
  236. {
  237. u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
  238. struct nfp_net *nn = netdev_priv(netdev);
  239. struct rtnl_link_stats64 *netdev_stats;
  240. struct rtnl_link_stats64 temp = {};
  241. u64 tmp[NN_ET_RVEC_GATHER_STATS];
  242. u8 __iomem *io_p;
  243. int i, j, k;
  244. u8 *p;
  245. netdev_stats = dev_get_stats(netdev, &temp);
  246. for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
  247. switch (nfp_net_et_stats[i].type) {
  248. case NETDEV_ET_STATS:
  249. p = (char *)netdev_stats + nfp_net_et_stats[i].off;
  250. data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ?
  251. *(u64 *)p : *(u32 *)p;
  252. break;
  253. case NFP_NET_DEV_ET_STATS:
  254. io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
  255. data[i] = readq(io_p);
  256. break;
  257. }
  258. }
  259. for (j = 0; j < nn->num_r_vecs; j++) {
  260. unsigned int start;
  261. do {
  262. start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync);
  263. data[i++] = nn->r_vecs[j].rx_pkts;
  264. tmp[0] = nn->r_vecs[j].hw_csum_rx_ok;
  265. tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok;
  266. tmp[2] = nn->r_vecs[j].hw_csum_rx_error;
  267. } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start));
  268. do {
  269. start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync);
  270. data[i++] = nn->r_vecs[j].tx_pkts;
  271. data[i++] = nn->r_vecs[j].tx_busy;
  272. tmp[3] = nn->r_vecs[j].hw_csum_tx;
  273. tmp[4] = nn->r_vecs[j].hw_csum_tx_inner;
  274. tmp[5] = nn->r_vecs[j].tx_gather;
  275. tmp[6] = nn->r_vecs[j].tx_lso;
  276. } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start));
  277. for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++)
  278. gathered_stats[k] += tmp[k];
  279. }
  280. for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
  281. data[i++] = gathered_stats[j];
  282. for (j = 0; j < nn->num_tx_rings; j++) {
  283. io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
  284. data[i++] = readq(io_p);
  285. io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
  286. data[i++] = readq(io_p);
  287. }
  288. for (j = 0; j < nn->num_rx_rings; j++) {
  289. io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
  290. data[i++] = readq(io_p);
  291. io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
  292. data[i++] = readq(io_p);
  293. }
  294. }
  295. static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
  296. {
  297. struct nfp_net *nn = netdev_priv(netdev);
  298. switch (sset) {
  299. case ETH_SS_STATS:
  300. return NN_ET_STATS_LEN;
  301. default:
  302. return -EOPNOTSUPP;
  303. }
  304. }
  305. /* RX network flow classification (RSS, filters, etc)
  306. */
  307. static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
  308. {
  309. static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
  310. [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP,
  311. [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP,
  312. [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP,
  313. [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP,
  314. [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4,
  315. [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6,
  316. };
  317. if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
  318. return 0;
  319. return xlate_ethtool_to_nfp[flow_type];
  320. }
  321. static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
  322. struct ethtool_rxnfc *cmd)
  323. {
  324. u32 nfp_rss_flag;
  325. cmd->data = 0;
  326. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  327. return -EOPNOTSUPP;
  328. nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
  329. if (!nfp_rss_flag)
  330. return -EINVAL;
  331. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  332. if (nn->rss_cfg & nfp_rss_flag)
  333. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  334. return 0;
  335. }
  336. static int nfp_net_get_rxnfc(struct net_device *netdev,
  337. struct ethtool_rxnfc *cmd, u32 *rule_locs)
  338. {
  339. struct nfp_net *nn = netdev_priv(netdev);
  340. switch (cmd->cmd) {
  341. case ETHTOOL_GRXRINGS:
  342. cmd->data = nn->num_rx_rings;
  343. return 0;
  344. case ETHTOOL_GRXFH:
  345. return nfp_net_get_rss_hash_opts(nn, cmd);
  346. default:
  347. return -EOPNOTSUPP;
  348. }
  349. }
  350. static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
  351. struct ethtool_rxnfc *nfc)
  352. {
  353. u32 new_rss_cfg = nn->rss_cfg;
  354. u32 nfp_rss_flag;
  355. int err;
  356. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  357. return -EOPNOTSUPP;
  358. /* RSS only supports IP SA/DA and L4 src/dst ports */
  359. if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
  360. RXH_L4_B_0_1 | RXH_L4_B_2_3))
  361. return -EINVAL;
  362. /* We need at least the IP SA/DA fields for hashing */
  363. if (!(nfc->data & RXH_IP_SRC) ||
  364. !(nfc->data & RXH_IP_DST))
  365. return -EINVAL;
  366. nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
  367. if (!nfp_rss_flag)
  368. return -EINVAL;
  369. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  370. case 0:
  371. new_rss_cfg &= ~nfp_rss_flag;
  372. break;
  373. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  374. new_rss_cfg |= nfp_rss_flag;
  375. break;
  376. default:
  377. return -EINVAL;
  378. }
  379. new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
  380. new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
  381. if (new_rss_cfg == nn->rss_cfg)
  382. return 0;
  383. writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
  384. err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
  385. if (err)
  386. return err;
  387. nn->rss_cfg = new_rss_cfg;
  388. nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
  389. return 0;
  390. }
  391. static int nfp_net_set_rxnfc(struct net_device *netdev,
  392. struct ethtool_rxnfc *cmd)
  393. {
  394. struct nfp_net *nn = netdev_priv(netdev);
  395. switch (cmd->cmd) {
  396. case ETHTOOL_SRXFH:
  397. return nfp_net_set_rss_hash_opt(nn, cmd);
  398. default:
  399. return -EOPNOTSUPP;
  400. }
  401. }
  402. static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
  403. {
  404. struct nfp_net *nn = netdev_priv(netdev);
  405. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  406. return 0;
  407. return ARRAY_SIZE(nn->rss_itbl);
  408. }
  409. static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
  410. {
  411. return NFP_NET_CFG_RSS_KEY_SZ;
  412. }
  413. static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
  414. u8 *hfunc)
  415. {
  416. struct nfp_net *nn = netdev_priv(netdev);
  417. int i;
  418. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  419. return -EOPNOTSUPP;
  420. if (indir)
  421. for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
  422. indir[i] = nn->rss_itbl[i];
  423. if (key)
  424. memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
  425. if (hfunc)
  426. *hfunc = ETH_RSS_HASH_TOP;
  427. return 0;
  428. }
  429. static int nfp_net_set_rxfh(struct net_device *netdev,
  430. const u32 *indir, const u8 *key,
  431. const u8 hfunc)
  432. {
  433. struct nfp_net *nn = netdev_priv(netdev);
  434. int i;
  435. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
  436. !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
  437. return -EOPNOTSUPP;
  438. if (!key && !indir)
  439. return 0;
  440. if (key) {
  441. memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
  442. nfp_net_rss_write_key(nn);
  443. }
  444. if (indir) {
  445. for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
  446. nn->rss_itbl[i] = indir[i];
  447. nfp_net_rss_write_itbl(nn);
  448. }
  449. return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
  450. }
  451. /* Dump BAR registers
  452. */
  453. static int nfp_net_get_regs_len(struct net_device *netdev)
  454. {
  455. return NFP_NET_CFG_BAR_SZ;
  456. }
  457. static void nfp_net_get_regs(struct net_device *netdev,
  458. struct ethtool_regs *regs, void *p)
  459. {
  460. struct nfp_net *nn = netdev_priv(netdev);
  461. u32 *regs_buf = p;
  462. int i;
  463. regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
  464. for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
  465. regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
  466. }
  467. static int nfp_net_get_coalesce(struct net_device *netdev,
  468. struct ethtool_coalesce *ec)
  469. {
  470. struct nfp_net *nn = netdev_priv(netdev);
  471. if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
  472. return -EINVAL;
  473. ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
  474. ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
  475. ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
  476. ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
  477. return 0;
  478. }
  479. static int nfp_net_set_coalesce(struct net_device *netdev,
  480. struct ethtool_coalesce *ec)
  481. {
  482. struct nfp_net *nn = netdev_priv(netdev);
  483. unsigned int factor;
  484. if (ec->rx_coalesce_usecs_irq ||
  485. ec->rx_max_coalesced_frames_irq ||
  486. ec->tx_coalesce_usecs_irq ||
  487. ec->tx_max_coalesced_frames_irq ||
  488. ec->stats_block_coalesce_usecs ||
  489. ec->use_adaptive_rx_coalesce ||
  490. ec->use_adaptive_tx_coalesce ||
  491. ec->pkt_rate_low ||
  492. ec->rx_coalesce_usecs_low ||
  493. ec->rx_max_coalesced_frames_low ||
  494. ec->tx_coalesce_usecs_low ||
  495. ec->tx_max_coalesced_frames_low ||
  496. ec->pkt_rate_high ||
  497. ec->rx_coalesce_usecs_high ||
  498. ec->rx_max_coalesced_frames_high ||
  499. ec->tx_coalesce_usecs_high ||
  500. ec->tx_max_coalesced_frames_high ||
  501. ec->rate_sample_interval)
  502. return -ENOTSUPP;
  503. /* Compute factor used to convert coalesce '_usecs' parameters to
  504. * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
  505. * count.
  506. */
  507. factor = nn->me_freq_mhz / 16;
  508. /* Each pair of (usecs, max_frames) fields specifies that interrupts
  509. * should be coalesced until
  510. * (usecs > 0 && time_since_first_completion >= usecs) ||
  511. * (max_frames > 0 && completed_frames >= max_frames)
  512. *
  513. * It is illegal to set both usecs and max_frames to zero as this would
  514. * cause interrupts to never be generated. To disable coalescing, set
  515. * usecs = 0 and max_frames = 1.
  516. *
  517. * Some implementations ignore the value of max_frames and use the
  518. * condition time_since_first_completion >= usecs
  519. */
  520. if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
  521. return -EINVAL;
  522. /* ensure valid configuration */
  523. if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
  524. return -EINVAL;
  525. if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
  526. return -EINVAL;
  527. if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
  528. return -EINVAL;
  529. if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
  530. return -EINVAL;
  531. if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
  532. return -EINVAL;
  533. if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
  534. return -EINVAL;
  535. /* configuration is valid */
  536. nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
  537. nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
  538. nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
  539. nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
  540. /* write configuration to device */
  541. nfp_net_coalesce_write_cfg(nn);
  542. return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
  543. }
  544. static void nfp_net_get_channels(struct net_device *netdev,
  545. struct ethtool_channels *channel)
  546. {
  547. struct nfp_net *nn = netdev_priv(netdev);
  548. unsigned int num_tx_rings;
  549. num_tx_rings = nn->num_tx_rings;
  550. if (nn->xdp_prog)
  551. num_tx_rings -= nn->num_rx_rings;
  552. channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
  553. channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
  554. channel->max_combined = min(channel->max_rx, channel->max_tx);
  555. channel->max_other = NFP_NET_NON_Q_VECTORS;
  556. channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
  557. channel->rx_count = nn->num_rx_rings - channel->combined_count;
  558. channel->tx_count = num_tx_rings - channel->combined_count;
  559. channel->other_count = NFP_NET_NON_Q_VECTORS;
  560. }
  561. static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
  562. unsigned int total_tx)
  563. {
  564. struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
  565. struct nfp_net_ring_set rx = {
  566. .n_rings = total_rx,
  567. .mtu = nn->netdev->mtu,
  568. .dcnt = nn->rxd_cnt,
  569. };
  570. struct nfp_net_ring_set tx = {
  571. .n_rings = total_tx,
  572. .dcnt = nn->txd_cnt,
  573. };
  574. if (nn->num_rx_rings != total_rx)
  575. reconfig_rx = &rx;
  576. if (nn->num_stack_tx_rings != total_tx ||
  577. (nn->xdp_prog && reconfig_rx))
  578. reconfig_tx = &tx;
  579. /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
  580. if (nn->xdp_prog)
  581. tx.n_rings += total_rx;
  582. return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
  583. reconfig_rx, reconfig_tx);
  584. }
  585. static int nfp_net_set_channels(struct net_device *netdev,
  586. struct ethtool_channels *channel)
  587. {
  588. struct nfp_net *nn = netdev_priv(netdev);
  589. unsigned int total_rx, total_tx;
  590. /* Reject unsupported */
  591. if (!channel->combined_count ||
  592. channel->other_count != NFP_NET_NON_Q_VECTORS ||
  593. (channel->rx_count && channel->tx_count))
  594. return -EINVAL;
  595. total_rx = channel->combined_count + channel->rx_count;
  596. total_tx = channel->combined_count + channel->tx_count;
  597. if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
  598. total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
  599. return -EINVAL;
  600. return nfp_net_set_num_rings(nn, total_rx, total_tx);
  601. }
  602. static const struct ethtool_ops nfp_net_ethtool_ops = {
  603. .get_drvinfo = nfp_net_get_drvinfo,
  604. .get_link = ethtool_op_get_link,
  605. .get_ringparam = nfp_net_get_ringparam,
  606. .set_ringparam = nfp_net_set_ringparam,
  607. .get_strings = nfp_net_get_strings,
  608. .get_ethtool_stats = nfp_net_get_stats,
  609. .get_sset_count = nfp_net_get_sset_count,
  610. .get_rxnfc = nfp_net_get_rxnfc,
  611. .set_rxnfc = nfp_net_set_rxnfc,
  612. .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
  613. .get_rxfh_key_size = nfp_net_get_rxfh_key_size,
  614. .get_rxfh = nfp_net_get_rxfh,
  615. .set_rxfh = nfp_net_set_rxfh,
  616. .get_regs_len = nfp_net_get_regs_len,
  617. .get_regs = nfp_net_get_regs,
  618. .get_coalesce = nfp_net_get_coalesce,
  619. .set_coalesce = nfp_net_set_coalesce,
  620. .get_channels = nfp_net_get_channels,
  621. .set_channels = nfp_net_set_channels,
  622. };
  623. void nfp_net_set_ethtool_ops(struct net_device *netdev)
  624. {
  625. netdev->ethtool_ops = &nfp_net_ethtool_ops;
  626. }