nfp_net_ethtool.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. /*
  2. * Copyright (C) 2015-2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * nfp_net_ethtool.c
  35. * Netronome network device driver: ethtool support
  36. * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
  37. * Jason McMullan <jason.mcmullan@netronome.com>
  38. * Rolf Neugebauer <rolf.neugebauer@netronome.com>
  39. * Brad Petrus <brad.petrus@netronome.com>
  40. */
  41. #include <linux/kernel.h>
  42. #include <linux/netdevice.h>
  43. #include <linux/etherdevice.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/pci.h>
  46. #include <linux/ethtool.h>
  47. #include "nfpcore/nfp.h"
  48. #include "nfp_net_ctrl.h"
  49. #include "nfp_net.h"
  50. enum nfp_dump_diag {
  51. NFP_DUMP_NSP_DIAG = 0,
  52. };
  53. /* Support for stats. Returns netdev, driver, and device stats */
  54. enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS };
  55. struct _nfp_net_et_stats {
  56. char name[ETH_GSTRING_LEN];
  57. int type;
  58. int sz;
  59. int off;
  60. };
  61. #define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \
  62. FIELD_SIZEOF(struct net_device_stats, m), \
  63. offsetof(struct net_device_stats, m)
  64. /* For stats in the control BAR (other than Q stats) */
  65. #define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \
  66. sizeof(u64), \
  67. (m)
  68. static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
  69. /* netdev stats */
  70. {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)},
  71. {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)},
  72. {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)},
  73. {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)},
  74. {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)},
  75. {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)},
  76. {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)},
  77. {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)},
  78. {"multicast", NN_ET_NETDEV_STAT(multicast)},
  79. {"collisions", NN_ET_NETDEV_STAT(collisions)},
  80. {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)},
  81. {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)},
  82. {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)},
  83. {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)},
  84. {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)},
  85. {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)},
  86. {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)},
  87. {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)},
  88. /* Stats from the device */
  89. {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)},
  90. {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)},
  91. {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)},
  92. {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)},
  93. {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)},
  94. {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)},
  95. {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)},
  96. {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)},
  97. {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)},
  98. {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)},
  99. {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)},
  100. {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)},
  101. {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)},
  102. {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)},
  103. {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)},
  104. {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
  105. {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
  106. {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
  107. {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
  108. {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
  109. /* see comments in outro functions in nfp_bpf_jit.c to find out
  110. * how different BPF modes use app-specific counters
  111. */
  112. {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
  113. {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
  114. {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
  115. {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
  116. {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
  117. {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
  118. };
  119. #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
  120. #define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
  121. #define NN_ET_RVEC_GATHER_STATS 7
  122. #define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
  123. #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
  124. NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
  125. static void nfp_net_get_nspinfo(struct nfp_net *nn, char *version)
  126. {
  127. struct nfp_nsp *nsp;
  128. if (!nn->cpp)
  129. return;
  130. nsp = nfp_nsp_open(nn->cpp);
  131. if (IS_ERR(nsp))
  132. return;
  133. snprintf(version, ETHTOOL_FWVERS_LEN, "sp:%hu.%hu",
  134. nfp_nsp_get_abi_ver_major(nsp),
  135. nfp_nsp_get_abi_ver_minor(nsp));
  136. nfp_nsp_close(nsp);
  137. }
  138. static void nfp_net_get_drvinfo(struct net_device *netdev,
  139. struct ethtool_drvinfo *drvinfo)
  140. {
  141. char nsp_version[ETHTOOL_FWVERS_LEN] = {};
  142. struct nfp_net *nn = netdev_priv(netdev);
  143. strlcpy(drvinfo->driver, nn->pdev->driver->name,
  144. sizeof(drvinfo->driver));
  145. strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
  146. nfp_net_get_nspinfo(nn, nsp_version);
  147. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  148. "%d.%d.%d.%d %s",
  149. nn->fw_ver.resv, nn->fw_ver.class,
  150. nn->fw_ver.major, nn->fw_ver.minor, nsp_version);
  151. strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
  152. sizeof(drvinfo->bus_info));
  153. drvinfo->n_stats = NN_ET_STATS_LEN;
  154. drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
  155. }
  156. static void nfp_net_get_ringparam(struct net_device *netdev,
  157. struct ethtool_ringparam *ring)
  158. {
  159. struct nfp_net *nn = netdev_priv(netdev);
  160. ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
  161. ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
  162. ring->rx_pending = nn->rxd_cnt;
  163. ring->tx_pending = nn->txd_cnt;
  164. }
  165. static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
  166. {
  167. struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
  168. struct nfp_net_ring_set rx = {
  169. .n_rings = nn->num_rx_rings,
  170. .mtu = nn->netdev->mtu,
  171. .dcnt = rxd_cnt,
  172. };
  173. struct nfp_net_ring_set tx = {
  174. .n_rings = nn->num_tx_rings,
  175. .dcnt = txd_cnt,
  176. };
  177. if (nn->rxd_cnt != rxd_cnt)
  178. reconfig_rx = &rx;
  179. if (nn->txd_cnt != txd_cnt)
  180. reconfig_tx = &tx;
  181. return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
  182. reconfig_rx, reconfig_tx);
  183. }
  184. static int nfp_net_set_ringparam(struct net_device *netdev,
  185. struct ethtool_ringparam *ring)
  186. {
  187. struct nfp_net *nn = netdev_priv(netdev);
  188. u32 rxd_cnt, txd_cnt;
  189. /* We don't have separate queues/rings for small/large frames. */
  190. if (ring->rx_mini_pending || ring->rx_jumbo_pending)
  191. return -EINVAL;
  192. /* Round up to supported values */
  193. rxd_cnt = roundup_pow_of_two(ring->rx_pending);
  194. txd_cnt = roundup_pow_of_two(ring->tx_pending);
  195. if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
  196. txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
  197. return -EINVAL;
  198. if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
  199. return 0;
  200. nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
  201. nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
  202. return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
  203. }
  204. static void nfp_net_get_strings(struct net_device *netdev,
  205. u32 stringset, u8 *data)
  206. {
  207. struct nfp_net *nn = netdev_priv(netdev);
  208. u8 *p = data;
  209. int i;
  210. switch (stringset) {
  211. case ETH_SS_STATS:
  212. for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
  213. memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
  214. p += ETH_GSTRING_LEN;
  215. }
  216. for (i = 0; i < nn->num_r_vecs; i++) {
  217. sprintf(p, "rvec_%u_rx_pkts", i);
  218. p += ETH_GSTRING_LEN;
  219. sprintf(p, "rvec_%u_tx_pkts", i);
  220. p += ETH_GSTRING_LEN;
  221. sprintf(p, "rvec_%u_tx_busy", i);
  222. p += ETH_GSTRING_LEN;
  223. }
  224. strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN);
  225. p += ETH_GSTRING_LEN;
  226. strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN);
  227. p += ETH_GSTRING_LEN;
  228. strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN);
  229. p += ETH_GSTRING_LEN;
  230. strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN);
  231. p += ETH_GSTRING_LEN;
  232. strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN);
  233. p += ETH_GSTRING_LEN;
  234. strncpy(p, "tx_gather", ETH_GSTRING_LEN);
  235. p += ETH_GSTRING_LEN;
  236. strncpy(p, "tx_lso", ETH_GSTRING_LEN);
  237. p += ETH_GSTRING_LEN;
  238. for (i = 0; i < nn->num_tx_rings; i++) {
  239. sprintf(p, "txq_%u_pkts", i);
  240. p += ETH_GSTRING_LEN;
  241. sprintf(p, "txq_%u_bytes", i);
  242. p += ETH_GSTRING_LEN;
  243. }
  244. for (i = 0; i < nn->num_rx_rings; i++) {
  245. sprintf(p, "rxq_%u_pkts", i);
  246. p += ETH_GSTRING_LEN;
  247. sprintf(p, "rxq_%u_bytes", i);
  248. p += ETH_GSTRING_LEN;
  249. }
  250. break;
  251. }
  252. }
  253. static void nfp_net_get_stats(struct net_device *netdev,
  254. struct ethtool_stats *stats, u64 *data)
  255. {
  256. u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
  257. struct nfp_net *nn = netdev_priv(netdev);
  258. struct rtnl_link_stats64 *netdev_stats;
  259. struct rtnl_link_stats64 temp = {};
  260. u64 tmp[NN_ET_RVEC_GATHER_STATS];
  261. u8 __iomem *io_p;
  262. int i, j, k;
  263. u8 *p;
  264. netdev_stats = dev_get_stats(netdev, &temp);
  265. for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
  266. switch (nfp_net_et_stats[i].type) {
  267. case NETDEV_ET_STATS:
  268. p = (char *)netdev_stats + nfp_net_et_stats[i].off;
  269. data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ?
  270. *(u64 *)p : *(u32 *)p;
  271. break;
  272. case NFP_NET_DEV_ET_STATS:
  273. io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
  274. data[i] = readq(io_p);
  275. break;
  276. }
  277. }
  278. for (j = 0; j < nn->num_r_vecs; j++) {
  279. unsigned int start;
  280. do {
  281. start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync);
  282. data[i++] = nn->r_vecs[j].rx_pkts;
  283. tmp[0] = nn->r_vecs[j].hw_csum_rx_ok;
  284. tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok;
  285. tmp[2] = nn->r_vecs[j].hw_csum_rx_error;
  286. } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start));
  287. do {
  288. start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync);
  289. data[i++] = nn->r_vecs[j].tx_pkts;
  290. data[i++] = nn->r_vecs[j].tx_busy;
  291. tmp[3] = nn->r_vecs[j].hw_csum_tx;
  292. tmp[4] = nn->r_vecs[j].hw_csum_tx_inner;
  293. tmp[5] = nn->r_vecs[j].tx_gather;
  294. tmp[6] = nn->r_vecs[j].tx_lso;
  295. } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start));
  296. for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++)
  297. gathered_stats[k] += tmp[k];
  298. }
  299. for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
  300. data[i++] = gathered_stats[j];
  301. for (j = 0; j < nn->num_tx_rings; j++) {
  302. io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
  303. data[i++] = readq(io_p);
  304. io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
  305. data[i++] = readq(io_p);
  306. }
  307. for (j = 0; j < nn->num_rx_rings; j++) {
  308. io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
  309. data[i++] = readq(io_p);
  310. io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
  311. data[i++] = readq(io_p);
  312. }
  313. }
  314. static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
  315. {
  316. struct nfp_net *nn = netdev_priv(netdev);
  317. switch (sset) {
  318. case ETH_SS_STATS:
  319. return NN_ET_STATS_LEN;
  320. default:
  321. return -EOPNOTSUPP;
  322. }
  323. }
  324. /* RX network flow classification (RSS, filters, etc)
  325. */
  326. static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
  327. {
  328. static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
  329. [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP,
  330. [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP,
  331. [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP,
  332. [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP,
  333. [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4,
  334. [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6,
  335. };
  336. if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
  337. return 0;
  338. return xlate_ethtool_to_nfp[flow_type];
  339. }
  340. static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
  341. struct ethtool_rxnfc *cmd)
  342. {
  343. u32 nfp_rss_flag;
  344. cmd->data = 0;
  345. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  346. return -EOPNOTSUPP;
  347. nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
  348. if (!nfp_rss_flag)
  349. return -EINVAL;
  350. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  351. if (nn->rss_cfg & nfp_rss_flag)
  352. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  353. return 0;
  354. }
  355. static int nfp_net_get_rxnfc(struct net_device *netdev,
  356. struct ethtool_rxnfc *cmd, u32 *rule_locs)
  357. {
  358. struct nfp_net *nn = netdev_priv(netdev);
  359. switch (cmd->cmd) {
  360. case ETHTOOL_GRXRINGS:
  361. cmd->data = nn->num_rx_rings;
  362. return 0;
  363. case ETHTOOL_GRXFH:
  364. return nfp_net_get_rss_hash_opts(nn, cmd);
  365. default:
  366. return -EOPNOTSUPP;
  367. }
  368. }
  369. static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
  370. struct ethtool_rxnfc *nfc)
  371. {
  372. u32 new_rss_cfg = nn->rss_cfg;
  373. u32 nfp_rss_flag;
  374. int err;
  375. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  376. return -EOPNOTSUPP;
  377. /* RSS only supports IP SA/DA and L4 src/dst ports */
  378. if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
  379. RXH_L4_B_0_1 | RXH_L4_B_2_3))
  380. return -EINVAL;
  381. /* We need at least the IP SA/DA fields for hashing */
  382. if (!(nfc->data & RXH_IP_SRC) ||
  383. !(nfc->data & RXH_IP_DST))
  384. return -EINVAL;
  385. nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
  386. if (!nfp_rss_flag)
  387. return -EINVAL;
  388. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  389. case 0:
  390. new_rss_cfg &= ~nfp_rss_flag;
  391. break;
  392. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  393. new_rss_cfg |= nfp_rss_flag;
  394. break;
  395. default:
  396. return -EINVAL;
  397. }
  398. new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
  399. new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
  400. if (new_rss_cfg == nn->rss_cfg)
  401. return 0;
  402. writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
  403. err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
  404. if (err)
  405. return err;
  406. nn->rss_cfg = new_rss_cfg;
  407. nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
  408. return 0;
  409. }
  410. static int nfp_net_set_rxnfc(struct net_device *netdev,
  411. struct ethtool_rxnfc *cmd)
  412. {
  413. struct nfp_net *nn = netdev_priv(netdev);
  414. switch (cmd->cmd) {
  415. case ETHTOOL_SRXFH:
  416. return nfp_net_set_rss_hash_opt(nn, cmd);
  417. default:
  418. return -EOPNOTSUPP;
  419. }
  420. }
  421. static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
  422. {
  423. struct nfp_net *nn = netdev_priv(netdev);
  424. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  425. return 0;
  426. return ARRAY_SIZE(nn->rss_itbl);
  427. }
  428. static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
  429. {
  430. return NFP_NET_CFG_RSS_KEY_SZ;
  431. }
  432. static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
  433. u8 *hfunc)
  434. {
  435. struct nfp_net *nn = netdev_priv(netdev);
  436. int i;
  437. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
  438. return -EOPNOTSUPP;
  439. if (indir)
  440. for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
  441. indir[i] = nn->rss_itbl[i];
  442. if (key)
  443. memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
  444. if (hfunc)
  445. *hfunc = ETH_RSS_HASH_TOP;
  446. return 0;
  447. }
  448. static int nfp_net_set_rxfh(struct net_device *netdev,
  449. const u32 *indir, const u8 *key,
  450. const u8 hfunc)
  451. {
  452. struct nfp_net *nn = netdev_priv(netdev);
  453. int i;
  454. if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
  455. !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
  456. return -EOPNOTSUPP;
  457. if (!key && !indir)
  458. return 0;
  459. if (key) {
  460. memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
  461. nfp_net_rss_write_key(nn);
  462. }
  463. if (indir) {
  464. for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
  465. nn->rss_itbl[i] = indir[i];
  466. nfp_net_rss_write_itbl(nn);
  467. }
  468. return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
  469. }
  470. /* Dump BAR registers
  471. */
  472. static int nfp_net_get_regs_len(struct net_device *netdev)
  473. {
  474. return NFP_NET_CFG_BAR_SZ;
  475. }
  476. static void nfp_net_get_regs(struct net_device *netdev,
  477. struct ethtool_regs *regs, void *p)
  478. {
  479. struct nfp_net *nn = netdev_priv(netdev);
  480. u32 *regs_buf = p;
  481. int i;
  482. regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
  483. for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
  484. regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
  485. }
  486. static int nfp_net_get_coalesce(struct net_device *netdev,
  487. struct ethtool_coalesce *ec)
  488. {
  489. struct nfp_net *nn = netdev_priv(netdev);
  490. if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
  491. return -EINVAL;
  492. ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
  493. ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
  494. ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
  495. ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
  496. return 0;
  497. }
  498. /* Other debug dumps
  499. */
  500. static int
  501. nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
  502. {
  503. struct nfp_resource *res;
  504. int ret;
  505. if (!nn->cpp)
  506. return -EOPNOTSUPP;
  507. dump->version = 1;
  508. dump->flag = NFP_DUMP_NSP_DIAG;
  509. res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG);
  510. if (IS_ERR(res))
  511. return PTR_ERR(res);
  512. if (buffer) {
  513. if (dump->len != nfp_resource_size(res)) {
  514. ret = -EINVAL;
  515. goto exit_release;
  516. }
  517. ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res),
  518. nfp_resource_address(res),
  519. buffer, dump->len);
  520. if (ret != dump->len)
  521. ret = ret < 0 ? ret : -EIO;
  522. else
  523. ret = 0;
  524. } else {
  525. dump->len = nfp_resource_size(res);
  526. ret = 0;
  527. }
  528. exit_release:
  529. nfp_resource_release(res);
  530. return ret;
  531. }
  532. static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val)
  533. {
  534. struct nfp_net *nn = netdev_priv(netdev);
  535. if (!nn->cpp)
  536. return -EOPNOTSUPP;
  537. if (val->flag != NFP_DUMP_NSP_DIAG)
  538. return -EINVAL;
  539. nn->ethtool_dump_flag = val->flag;
  540. return 0;
  541. }
  542. static int
  543. nfp_net_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
  544. {
  545. return nfp_dump_nsp_diag(netdev_priv(netdev), dump, NULL);
  546. }
  547. static int
  548. nfp_net_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
  549. void *buffer)
  550. {
  551. return nfp_dump_nsp_diag(netdev_priv(netdev), dump, buffer);
  552. }
  553. static int nfp_net_set_coalesce(struct net_device *netdev,
  554. struct ethtool_coalesce *ec)
  555. {
  556. struct nfp_net *nn = netdev_priv(netdev);
  557. unsigned int factor;
  558. if (ec->rx_coalesce_usecs_irq ||
  559. ec->rx_max_coalesced_frames_irq ||
  560. ec->tx_coalesce_usecs_irq ||
  561. ec->tx_max_coalesced_frames_irq ||
  562. ec->stats_block_coalesce_usecs ||
  563. ec->use_adaptive_rx_coalesce ||
  564. ec->use_adaptive_tx_coalesce ||
  565. ec->pkt_rate_low ||
  566. ec->rx_coalesce_usecs_low ||
  567. ec->rx_max_coalesced_frames_low ||
  568. ec->tx_coalesce_usecs_low ||
  569. ec->tx_max_coalesced_frames_low ||
  570. ec->pkt_rate_high ||
  571. ec->rx_coalesce_usecs_high ||
  572. ec->rx_max_coalesced_frames_high ||
  573. ec->tx_coalesce_usecs_high ||
  574. ec->tx_max_coalesced_frames_high ||
  575. ec->rate_sample_interval)
  576. return -ENOTSUPP;
  577. /* Compute factor used to convert coalesce '_usecs' parameters to
  578. * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
  579. * count.
  580. */
  581. factor = nn->me_freq_mhz / 16;
  582. /* Each pair of (usecs, max_frames) fields specifies that interrupts
  583. * should be coalesced until
  584. * (usecs > 0 && time_since_first_completion >= usecs) ||
  585. * (max_frames > 0 && completed_frames >= max_frames)
  586. *
  587. * It is illegal to set both usecs and max_frames to zero as this would
  588. * cause interrupts to never be generated. To disable coalescing, set
  589. * usecs = 0 and max_frames = 1.
  590. *
  591. * Some implementations ignore the value of max_frames and use the
  592. * condition time_since_first_completion >= usecs
  593. */
  594. if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
  595. return -EINVAL;
  596. /* ensure valid configuration */
  597. if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
  598. return -EINVAL;
  599. if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
  600. return -EINVAL;
  601. if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
  602. return -EINVAL;
  603. if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
  604. return -EINVAL;
  605. if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
  606. return -EINVAL;
  607. if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
  608. return -EINVAL;
  609. /* configuration is valid */
  610. nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
  611. nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
  612. nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
  613. nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
  614. /* write configuration to device */
  615. nfp_net_coalesce_write_cfg(nn);
  616. return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
  617. }
  618. static void nfp_net_get_channels(struct net_device *netdev,
  619. struct ethtool_channels *channel)
  620. {
  621. struct nfp_net *nn = netdev_priv(netdev);
  622. unsigned int num_tx_rings;
  623. num_tx_rings = nn->num_tx_rings;
  624. if (nn->xdp_prog)
  625. num_tx_rings -= nn->num_rx_rings;
  626. channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
  627. channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
  628. channel->max_combined = min(channel->max_rx, channel->max_tx);
  629. channel->max_other = NFP_NET_NON_Q_VECTORS;
  630. channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
  631. channel->rx_count = nn->num_rx_rings - channel->combined_count;
  632. channel->tx_count = num_tx_rings - channel->combined_count;
  633. channel->other_count = NFP_NET_NON_Q_VECTORS;
  634. }
  635. static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
  636. unsigned int total_tx)
  637. {
  638. struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
  639. struct nfp_net_ring_set rx = {
  640. .n_rings = total_rx,
  641. .mtu = nn->netdev->mtu,
  642. .dcnt = nn->rxd_cnt,
  643. };
  644. struct nfp_net_ring_set tx = {
  645. .n_rings = total_tx,
  646. .dcnt = nn->txd_cnt,
  647. };
  648. if (nn->num_rx_rings != total_rx)
  649. reconfig_rx = &rx;
  650. if (nn->num_stack_tx_rings != total_tx ||
  651. (nn->xdp_prog && reconfig_rx))
  652. reconfig_tx = &tx;
  653. /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
  654. if (nn->xdp_prog)
  655. tx.n_rings += total_rx;
  656. return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
  657. reconfig_rx, reconfig_tx);
  658. }
  659. static int nfp_net_set_channels(struct net_device *netdev,
  660. struct ethtool_channels *channel)
  661. {
  662. struct nfp_net *nn = netdev_priv(netdev);
  663. unsigned int total_rx, total_tx;
  664. /* Reject unsupported */
  665. if (!channel->combined_count ||
  666. channel->other_count != NFP_NET_NON_Q_VECTORS ||
  667. (channel->rx_count && channel->tx_count))
  668. return -EINVAL;
  669. total_rx = channel->combined_count + channel->rx_count;
  670. total_tx = channel->combined_count + channel->tx_count;
  671. if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
  672. total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
  673. return -EINVAL;
  674. return nfp_net_set_num_rings(nn, total_rx, total_tx);
  675. }
  676. static const struct ethtool_ops nfp_net_ethtool_ops = {
  677. .get_drvinfo = nfp_net_get_drvinfo,
  678. .get_link = ethtool_op_get_link,
  679. .get_ringparam = nfp_net_get_ringparam,
  680. .set_ringparam = nfp_net_set_ringparam,
  681. .get_strings = nfp_net_get_strings,
  682. .get_ethtool_stats = nfp_net_get_stats,
  683. .get_sset_count = nfp_net_get_sset_count,
  684. .get_rxnfc = nfp_net_get_rxnfc,
  685. .set_rxnfc = nfp_net_set_rxnfc,
  686. .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
  687. .get_rxfh_key_size = nfp_net_get_rxfh_key_size,
  688. .get_rxfh = nfp_net_get_rxfh,
  689. .set_rxfh = nfp_net_set_rxfh,
  690. .get_regs_len = nfp_net_get_regs_len,
  691. .get_regs = nfp_net_get_regs,
  692. .set_dump = nfp_net_set_dump,
  693. .get_dump_flag = nfp_net_get_dump_flag,
  694. .get_dump_data = nfp_net_get_dump_data,
  695. .get_coalesce = nfp_net_get_coalesce,
  696. .set_coalesce = nfp_net_set_coalesce,
  697. .get_channels = nfp_net_get_channels,
  698. .set_channels = nfp_net_set_channels,
  699. };
  700. void nfp_net_set_ethtool_ops(struct net_device *netdev)
  701. {
  702. netdev->ethtool_ops = &nfp_net_ethtool_ops;
  703. }