ice_ethtool.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. /* ethtool support for ice */
  4. #include "ice.h"
  5. struct ice_stats {
  6. char stat_string[ETH_GSTRING_LEN];
  7. int sizeof_stat;
  8. int stat_offset;
  9. };
  10. #define ICE_STAT(_type, _name, _stat) { \
  11. .stat_string = _name, \
  12. .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
  13. .stat_offset = offsetof(_type, _stat) \
  14. }
  15. #define ICE_VSI_STAT(_name, _stat) \
  16. ICE_STAT(struct ice_vsi, _name, _stat)
  17. #define ICE_PF_STAT(_name, _stat) \
  18. ICE_STAT(struct ice_pf, _name, _stat)
  19. static int ice_q_stats_len(struct net_device *netdev)
  20. {
  21. struct ice_netdev_priv *np = netdev_priv(netdev);
  22. return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
  23. (sizeof(struct ice_q_stats) / sizeof(u64)));
  24. }
  25. #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
  26. #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
  27. #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
  28. ice_q_stats_len(n))
  29. static const struct ice_stats ice_gstrings_vsi_stats[] = {
  30. ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
  31. ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
  32. ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
  33. ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
  34. ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
  35. ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
  36. ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
  37. ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
  38. ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
  39. ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
  40. ICE_VSI_STAT("tx_linearize", tx_linearize),
  41. ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
  42. ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
  43. ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
  44. };
  45. /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  46. * but they aren't. This device is capable of supporting multiple
  47. * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
  48. * netdevs whereas the PF_STATs are for the physical function that's
  49. * hosting these netdevs.
  50. *
  51. * The PF_STATs are appended to the netdev stats only when ethtool -S
  52. * is queried on the base PF netdev.
  53. */
  54. static struct ice_stats ice_gstrings_pf_stats[] = {
  55. ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
  56. ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
  57. ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
  58. ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
  59. ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
  60. ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
  61. ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
  62. ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
  63. ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
  64. ICE_PF_STAT("tx_size_64", stats.tx_size_64),
  65. ICE_PF_STAT("rx_size_64", stats.rx_size_64),
  66. ICE_PF_STAT("tx_size_127", stats.tx_size_127),
  67. ICE_PF_STAT("rx_size_127", stats.rx_size_127),
  68. ICE_PF_STAT("tx_size_255", stats.tx_size_255),
  69. ICE_PF_STAT("rx_size_255", stats.rx_size_255),
  70. ICE_PF_STAT("tx_size_511", stats.tx_size_511),
  71. ICE_PF_STAT("rx_size_511", stats.rx_size_511),
  72. ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
  73. ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
  74. ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
  75. ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
  76. ICE_PF_STAT("tx_size_big", stats.tx_size_big),
  77. ICE_PF_STAT("rx_size_big", stats.rx_size_big),
  78. ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
  79. ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
  80. ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
  81. ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
  82. ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
  83. ICE_PF_STAT("rx_undersize", stats.rx_undersize),
  84. ICE_PF_STAT("rx_fragments", stats.rx_fragments),
  85. ICE_PF_STAT("rx_oversize", stats.rx_oversize),
  86. ICE_PF_STAT("rx_jabber", stats.rx_jabber),
  87. ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
  88. ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
  89. ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
  90. ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
  91. ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
  92. ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
  93. ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
  94. };
  95. static u32 ice_regs_dump_list[] = {
  96. PFGEN_STATE,
  97. PRTGEN_STATUS,
  98. QRX_CTRL(0),
  99. QINT_TQCTL(0),
  100. QINT_RQCTL(0),
  101. PFINT_OICR_ENA,
  102. QRX_ITR(0),
  103. };
  104. /**
  105. * ice_nvm_version_str - format the NVM version strings
  106. * @hw: ptr to the hardware info
  107. */
  108. static char *ice_nvm_version_str(struct ice_hw *hw)
  109. {
  110. static char buf[ICE_ETHTOOL_FWVER_LEN];
  111. u8 ver, patch;
  112. u32 full_ver;
  113. u16 build;
  114. full_ver = hw->nvm.oem_ver;
  115. ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
  116. build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
  117. ICE_OEM_VER_BUILD_SHIFT);
  118. patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
  119. snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
  120. (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
  121. (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
  122. hw->nvm.eetrack, ver, build, patch);
  123. return buf;
  124. }
  125. static void
  126. ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
  127. {
  128. struct ice_netdev_priv *np = netdev_priv(netdev);
  129. struct ice_vsi *vsi = np->vsi;
  130. struct ice_pf *pf = vsi->back;
  131. strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
  132. strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
  133. strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
  134. sizeof(drvinfo->fw_version));
  135. strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
  136. sizeof(drvinfo->bus_info));
  137. }
  138. static int ice_get_regs_len(struct net_device __always_unused *netdev)
  139. {
  140. return sizeof(ice_regs_dump_list);
  141. }
  142. static void
  143. ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
  144. {
  145. struct ice_netdev_priv *np = netdev_priv(netdev);
  146. struct ice_pf *pf = np->vsi->back;
  147. struct ice_hw *hw = &pf->hw;
  148. u32 *regs_buf = (u32 *)p;
  149. int i;
  150. regs->version = 1;
  151. for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
  152. regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
  153. }
  154. static u32 ice_get_msglevel(struct net_device *netdev)
  155. {
  156. struct ice_netdev_priv *np = netdev_priv(netdev);
  157. struct ice_pf *pf = np->vsi->back;
  158. #ifndef CONFIG_DYNAMIC_DEBUG
  159. if (pf->hw.debug_mask)
  160. netdev_info(netdev, "hw debug_mask: 0x%llX\n",
  161. pf->hw.debug_mask);
  162. #endif /* !CONFIG_DYNAMIC_DEBUG */
  163. return pf->msg_enable;
  164. }
  165. static void ice_set_msglevel(struct net_device *netdev, u32 data)
  166. {
  167. struct ice_netdev_priv *np = netdev_priv(netdev);
  168. struct ice_pf *pf = np->vsi->back;
  169. #ifndef CONFIG_DYNAMIC_DEBUG
  170. if (ICE_DBG_USER & data)
  171. pf->hw.debug_mask = data;
  172. else
  173. pf->msg_enable = data;
  174. #else
  175. pf->msg_enable = data;
  176. #endif /* !CONFIG_DYNAMIC_DEBUG */
  177. }
  178. static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
  179. {
  180. struct ice_netdev_priv *np = netdev_priv(netdev);
  181. struct ice_vsi *vsi = np->vsi;
  182. char *p = (char *)data;
  183. unsigned int i;
  184. switch (stringset) {
  185. case ETH_SS_STATS:
  186. for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
  187. snprintf(p, ETH_GSTRING_LEN, "%s",
  188. ice_gstrings_vsi_stats[i].stat_string);
  189. p += ETH_GSTRING_LEN;
  190. }
  191. ice_for_each_alloc_txq(vsi, i) {
  192. snprintf(p, ETH_GSTRING_LEN,
  193. "tx-queue-%u.tx_packets", i);
  194. p += ETH_GSTRING_LEN;
  195. snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
  196. p += ETH_GSTRING_LEN;
  197. }
  198. ice_for_each_alloc_rxq(vsi, i) {
  199. snprintf(p, ETH_GSTRING_LEN,
  200. "rx-queue-%u.rx_packets", i);
  201. p += ETH_GSTRING_LEN;
  202. snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
  203. p += ETH_GSTRING_LEN;
  204. }
  205. if (vsi->type != ICE_VSI_PF)
  206. return;
  207. for (i = 0; i < ICE_PF_STATS_LEN; i++) {
  208. snprintf(p, ETH_GSTRING_LEN, "port.%s",
  209. ice_gstrings_pf_stats[i].stat_string);
  210. p += ETH_GSTRING_LEN;
  211. }
  212. break;
  213. default:
  214. break;
  215. }
  216. }
  217. static int ice_get_sset_count(struct net_device *netdev, int sset)
  218. {
  219. switch (sset) {
  220. case ETH_SS_STATS:
  221. /* The number (and order) of strings reported *must* remain
  222. * constant for a given netdevice. This function must not
  223. * report a different number based on run time parameters
  224. * (such as the number of queues in use, or the setting of
  225. * a private ethtool flag). This is due to the nature of the
  226. * ethtool stats API.
  227. *
  228. * User space programs such as ethtool must make 3 separate
  229. * ioctl requests, one for size, one for the strings, and
  230. * finally one for the stats. Since these cross into
  231. * user space, changes to the number or size could result in
  232. * undefined memory access or incorrect string<->value
  233. * correlations for statistics.
  234. *
  235. * Even if it appears to be safe, changes to the size or
  236. * order of strings will suffer from race conditions and are
  237. * not safe.
  238. */
  239. return ICE_ALL_STATS_LEN(netdev);
  240. default:
  241. return -EOPNOTSUPP;
  242. }
  243. }
  244. static void
  245. ice_get_ethtool_stats(struct net_device *netdev,
  246. struct ethtool_stats __always_unused *stats, u64 *data)
  247. {
  248. struct ice_netdev_priv *np = netdev_priv(netdev);
  249. struct ice_vsi *vsi = np->vsi;
  250. struct ice_pf *pf = vsi->back;
  251. struct ice_ring *ring;
  252. unsigned int j = 0;
  253. int i = 0;
  254. char *p;
  255. for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
  256. p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
  257. data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
  258. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  259. }
  260. /* populate per queue stats */
  261. rcu_read_lock();
  262. ice_for_each_alloc_txq(vsi, j) {
  263. ring = READ_ONCE(vsi->tx_rings[j]);
  264. if (ring) {
  265. data[i++] = ring->stats.pkts;
  266. data[i++] = ring->stats.bytes;
  267. } else {
  268. data[i++] = 0;
  269. data[i++] = 0;
  270. }
  271. }
  272. ice_for_each_alloc_rxq(vsi, j) {
  273. ring = READ_ONCE(vsi->rx_rings[j]);
  274. if (ring) {
  275. data[i++] = ring->stats.pkts;
  276. data[i++] = ring->stats.bytes;
  277. } else {
  278. data[i++] = 0;
  279. data[i++] = 0;
  280. }
  281. }
  282. rcu_read_unlock();
  283. if (vsi->type != ICE_VSI_PF)
  284. return;
  285. for (j = 0; j < ICE_PF_STATS_LEN; j++) {
  286. p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
  287. data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
  288. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  289. }
  290. }
  291. static int
  292. ice_get_link_ksettings(struct net_device *netdev,
  293. struct ethtool_link_ksettings *ks)
  294. {
  295. struct ice_netdev_priv *np = netdev_priv(netdev);
  296. struct ice_link_status *hw_link_info;
  297. struct ice_vsi *vsi = np->vsi;
  298. bool link_up;
  299. hw_link_info = &vsi->port_info->phy.link_info;
  300. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  301. ethtool_link_ksettings_add_link_mode(ks, supported,
  302. 10000baseT_Full);
  303. ethtool_link_ksettings_add_link_mode(ks, advertising,
  304. 10000baseT_Full);
  305. /* set speed and duplex */
  306. if (link_up) {
  307. switch (hw_link_info->link_speed) {
  308. case ICE_AQ_LINK_SPEED_100MB:
  309. ks->base.speed = SPEED_100;
  310. break;
  311. case ICE_AQ_LINK_SPEED_2500MB:
  312. ks->base.speed = SPEED_2500;
  313. break;
  314. case ICE_AQ_LINK_SPEED_5GB:
  315. ks->base.speed = SPEED_5000;
  316. break;
  317. case ICE_AQ_LINK_SPEED_10GB:
  318. ks->base.speed = SPEED_10000;
  319. break;
  320. case ICE_AQ_LINK_SPEED_25GB:
  321. ks->base.speed = SPEED_25000;
  322. break;
  323. case ICE_AQ_LINK_SPEED_40GB:
  324. ks->base.speed = SPEED_40000;
  325. break;
  326. default:
  327. ks->base.speed = SPEED_UNKNOWN;
  328. break;
  329. }
  330. ks->base.duplex = DUPLEX_FULL;
  331. } else {
  332. ks->base.speed = SPEED_UNKNOWN;
  333. ks->base.duplex = DUPLEX_UNKNOWN;
  334. }
  335. /* set autoneg settings */
  336. ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
  337. AUTONEG_ENABLE : AUTONEG_DISABLE);
  338. /* set media type settings */
  339. switch (vsi->port_info->phy.media_type) {
  340. case ICE_MEDIA_FIBER:
  341. ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
  342. ks->base.port = PORT_FIBRE;
  343. break;
  344. case ICE_MEDIA_BASET:
  345. ethtool_link_ksettings_add_link_mode(ks, supported, TP);
  346. ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
  347. ks->base.port = PORT_TP;
  348. break;
  349. case ICE_MEDIA_BACKPLANE:
  350. ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
  351. ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
  352. ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
  353. ethtool_link_ksettings_add_link_mode(ks, advertising,
  354. Backplane);
  355. ks->base.port = PORT_NONE;
  356. break;
  357. case ICE_MEDIA_DA:
  358. ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
  359. ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
  360. ks->base.port = PORT_DA;
  361. break;
  362. default:
  363. ks->base.port = PORT_OTHER;
  364. break;
  365. }
  366. /* flow control is symmetric and always supported */
  367. ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
  368. switch (vsi->port_info->fc.req_mode) {
  369. case ICE_FC_FULL:
  370. ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
  371. break;
  372. case ICE_FC_TX_PAUSE:
  373. ethtool_link_ksettings_add_link_mode(ks, advertising,
  374. Asym_Pause);
  375. break;
  376. case ICE_FC_RX_PAUSE:
  377. ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
  378. ethtool_link_ksettings_add_link_mode(ks, advertising,
  379. Asym_Pause);
  380. break;
  381. case ICE_FC_PFC:
  382. default:
  383. ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
  384. ethtool_link_ksettings_del_link_mode(ks, advertising,
  385. Asym_Pause);
  386. break;
  387. }
  388. return 0;
  389. }
  390. /**
  391. * ice_get_rxnfc - command to get RX flow classification rules
  392. * @netdev: network interface device structure
  393. * @cmd: ethtool rxnfc command
  394. * @rule_locs: buffer to rturn Rx flow classification rules
  395. *
  396. * Returns Success if the command is supported.
  397. */
  398. static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
  399. u32 __always_unused *rule_locs)
  400. {
  401. struct ice_netdev_priv *np = netdev_priv(netdev);
  402. struct ice_vsi *vsi = np->vsi;
  403. int ret = -EOPNOTSUPP;
  404. switch (cmd->cmd) {
  405. case ETHTOOL_GRXRINGS:
  406. cmd->data = vsi->rss_size;
  407. ret = 0;
  408. break;
  409. default:
  410. break;
  411. }
  412. return ret;
  413. }
  414. static void
  415. ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
  416. {
  417. struct ice_netdev_priv *np = netdev_priv(netdev);
  418. struct ice_vsi *vsi = np->vsi;
  419. ring->rx_max_pending = ICE_MAX_NUM_DESC;
  420. ring->tx_max_pending = ICE_MAX_NUM_DESC;
  421. ring->rx_pending = vsi->rx_rings[0]->count;
  422. ring->tx_pending = vsi->tx_rings[0]->count;
  423. ring->rx_mini_pending = ICE_MIN_NUM_DESC;
  424. ring->rx_mini_max_pending = 0;
  425. ring->rx_jumbo_max_pending = 0;
  426. ring->rx_jumbo_pending = 0;
  427. }
  428. static int
  429. ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
  430. {
  431. struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
  432. struct ice_netdev_priv *np = netdev_priv(netdev);
  433. struct ice_vsi *vsi = np->vsi;
  434. struct ice_pf *pf = vsi->back;
  435. int i, timeout = 50, err = 0;
  436. u32 new_rx_cnt, new_tx_cnt;
  437. if (ring->tx_pending > ICE_MAX_NUM_DESC ||
  438. ring->tx_pending < ICE_MIN_NUM_DESC ||
  439. ring->rx_pending > ICE_MAX_NUM_DESC ||
  440. ring->rx_pending < ICE_MIN_NUM_DESC) {
  441. netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
  442. ring->tx_pending, ring->rx_pending,
  443. ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
  444. return -EINVAL;
  445. }
  446. new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
  447. new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
  448. /* if nothing to do return success */
  449. if (new_tx_cnt == vsi->tx_rings[0]->count &&
  450. new_rx_cnt == vsi->rx_rings[0]->count) {
  451. netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
  452. return 0;
  453. }
  454. while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
  455. timeout--;
  456. if (!timeout)
  457. return -EBUSY;
  458. usleep_range(1000, 2000);
  459. }
  460. /* set for the next time the netdev is started */
  461. if (!netif_running(vsi->netdev)) {
  462. for (i = 0; i < vsi->alloc_txq; i++)
  463. vsi->tx_rings[i]->count = new_tx_cnt;
  464. for (i = 0; i < vsi->alloc_rxq; i++)
  465. vsi->rx_rings[i]->count = new_rx_cnt;
  466. netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
  467. goto done;
  468. }
  469. if (new_tx_cnt == vsi->tx_rings[0]->count)
  470. goto process_rx;
  471. /* alloc updated Tx resources */
  472. netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
  473. vsi->tx_rings[0]->count, new_tx_cnt);
  474. tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
  475. sizeof(struct ice_ring), GFP_KERNEL);
  476. if (!tx_rings) {
  477. err = -ENOMEM;
  478. goto done;
  479. }
  480. for (i = 0; i < vsi->alloc_txq; i++) {
  481. /* clone ring and setup updated count */
  482. tx_rings[i] = *vsi->tx_rings[i];
  483. tx_rings[i].count = new_tx_cnt;
  484. tx_rings[i].desc = NULL;
  485. tx_rings[i].tx_buf = NULL;
  486. err = ice_setup_tx_ring(&tx_rings[i]);
  487. if (err) {
  488. while (i) {
  489. i--;
  490. ice_clean_tx_ring(&tx_rings[i]);
  491. }
  492. devm_kfree(&pf->pdev->dev, tx_rings);
  493. goto done;
  494. }
  495. }
  496. process_rx:
  497. if (new_rx_cnt == vsi->rx_rings[0]->count)
  498. goto process_link;
  499. /* alloc updated Rx resources */
  500. netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
  501. vsi->rx_rings[0]->count, new_rx_cnt);
  502. rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
  503. sizeof(struct ice_ring), GFP_KERNEL);
  504. if (!rx_rings) {
  505. err = -ENOMEM;
  506. goto done;
  507. }
  508. for (i = 0; i < vsi->alloc_rxq; i++) {
  509. /* clone ring and setup updated count */
  510. rx_rings[i] = *vsi->rx_rings[i];
  511. rx_rings[i].count = new_rx_cnt;
  512. rx_rings[i].desc = NULL;
  513. rx_rings[i].rx_buf = NULL;
  514. /* this is to allow wr32 to have something to write to
  515. * during early allocation of Rx buffers
  516. */
  517. rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
  518. err = ice_setup_rx_ring(&rx_rings[i]);
  519. if (err)
  520. goto rx_unwind;
  521. /* allocate Rx buffers */
  522. err = ice_alloc_rx_bufs(&rx_rings[i],
  523. ICE_DESC_UNUSED(&rx_rings[i]));
  524. rx_unwind:
  525. if (err) {
  526. while (i) {
  527. i--;
  528. ice_free_rx_ring(&rx_rings[i]);
  529. }
  530. devm_kfree(&pf->pdev->dev, rx_rings);
  531. err = -ENOMEM;
  532. goto free_tx;
  533. }
  534. }
  535. process_link:
  536. /* Bring interface down, copy in the new ring info, then restore the
  537. * interface. if VSI is up, bring it down and then back up
  538. */
  539. if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
  540. ice_down(vsi);
  541. if (tx_rings) {
  542. for (i = 0; i < vsi->alloc_txq; i++) {
  543. ice_free_tx_ring(vsi->tx_rings[i]);
  544. *vsi->tx_rings[i] = tx_rings[i];
  545. }
  546. devm_kfree(&pf->pdev->dev, tx_rings);
  547. }
  548. if (rx_rings) {
  549. for (i = 0; i < vsi->alloc_rxq; i++) {
  550. ice_free_rx_ring(vsi->rx_rings[i]);
  551. /* copy the real tail offset */
  552. rx_rings[i].tail = vsi->rx_rings[i]->tail;
  553. /* this is to fake out the allocation routine
  554. * into thinking it has to realloc everything
  555. * but the recycling logic will let us re-use
  556. * the buffers allocated above
  557. */
  558. rx_rings[i].next_to_use = 0;
  559. rx_rings[i].next_to_clean = 0;
  560. rx_rings[i].next_to_alloc = 0;
  561. *vsi->rx_rings[i] = rx_rings[i];
  562. }
  563. devm_kfree(&pf->pdev->dev, rx_rings);
  564. }
  565. ice_up(vsi);
  566. }
  567. goto done;
  568. free_tx:
  569. /* error cleanup if the Rx allocations failed after getting Tx */
  570. if (tx_rings) {
  571. for (i = 0; i < vsi->alloc_txq; i++)
  572. ice_free_tx_ring(&tx_rings[i]);
  573. devm_kfree(&pf->pdev->dev, tx_rings);
  574. }
  575. done:
  576. clear_bit(__ICE_CFG_BUSY, pf->state);
  577. return err;
  578. }
  579. static int ice_nway_reset(struct net_device *netdev)
  580. {
  581. /* restart autonegotiation */
  582. struct ice_netdev_priv *np = netdev_priv(netdev);
  583. struct ice_link_status *hw_link_info;
  584. struct ice_vsi *vsi = np->vsi;
  585. struct ice_port_info *pi;
  586. enum ice_status status;
  587. bool link_up;
  588. pi = vsi->port_info;
  589. hw_link_info = &pi->phy.link_info;
  590. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  591. status = ice_aq_set_link_restart_an(pi, link_up, NULL);
  592. if (status) {
  593. netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
  594. status, pi->hw->adminq.sq_last_status);
  595. return -EIO;
  596. }
  597. return 0;
  598. }
  599. /**
  600. * ice_get_pauseparam - Get Flow Control status
  601. * @netdev: network interface device structure
  602. * @pause: ethernet pause (flow control) parameters
  603. */
  604. static void
  605. ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  606. {
  607. struct ice_netdev_priv *np = netdev_priv(netdev);
  608. struct ice_port_info *pi;
  609. pi = np->vsi->port_info;
  610. pause->autoneg =
  611. ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
  612. AUTONEG_ENABLE : AUTONEG_DISABLE);
  613. if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
  614. pause->rx_pause = 1;
  615. } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
  616. pause->tx_pause = 1;
  617. } else if (pi->fc.current_mode == ICE_FC_FULL) {
  618. pause->rx_pause = 1;
  619. pause->tx_pause = 1;
  620. }
  621. }
  622. /**
  623. * ice_set_pauseparam - Set Flow Control parameter
  624. * @netdev: network interface device structure
  625. * @pause: return tx/rx flow control status
  626. */
  627. static int
  628. ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  629. {
  630. struct ice_netdev_priv *np = netdev_priv(netdev);
  631. struct ice_link_status *hw_link_info;
  632. struct ice_pf *pf = np->vsi->back;
  633. struct ice_vsi *vsi = np->vsi;
  634. struct ice_hw *hw = &pf->hw;
  635. struct ice_port_info *pi;
  636. enum ice_status status;
  637. u8 aq_failures;
  638. bool link_up;
  639. int err = 0;
  640. pi = vsi->port_info;
  641. hw_link_info = &pi->phy.link_info;
  642. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  643. /* Changing the port's flow control is not supported if this isn't the
  644. * PF VSI
  645. */
  646. if (vsi->type != ICE_VSI_PF) {
  647. netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
  648. return -EOPNOTSUPP;
  649. }
  650. if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
  651. netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
  652. return -EOPNOTSUPP;
  653. }
  654. /* If we have link and don't have autoneg */
  655. if (!test_bit(__ICE_DOWN, pf->state) &&
  656. !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
  657. /* Send message that it might not necessarily work*/
  658. netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
  659. }
  660. if (pause->rx_pause && pause->tx_pause)
  661. pi->fc.req_mode = ICE_FC_FULL;
  662. else if (pause->rx_pause && !pause->tx_pause)
  663. pi->fc.req_mode = ICE_FC_RX_PAUSE;
  664. else if (!pause->rx_pause && pause->tx_pause)
  665. pi->fc.req_mode = ICE_FC_TX_PAUSE;
  666. else if (!pause->rx_pause && !pause->tx_pause)
  667. pi->fc.req_mode = ICE_FC_NONE;
  668. else
  669. return -EINVAL;
  670. /* Tell the OS link is going down, the link will go back up when fw
  671. * says it is ready asynchronously
  672. */
  673. ice_print_link_msg(vsi, false);
  674. netif_carrier_off(netdev);
  675. netif_tx_stop_all_queues(netdev);
  676. /* Set the FC mode and only restart AN if link is up */
  677. status = ice_set_fc(pi, &aq_failures, link_up);
  678. if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
  679. netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
  680. status, hw->adminq.sq_last_status);
  681. err = -EAGAIN;
  682. } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
  683. netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
  684. status, hw->adminq.sq_last_status);
  685. err = -EAGAIN;
  686. } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
  687. netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
  688. status, hw->adminq.sq_last_status);
  689. err = -EAGAIN;
  690. }
  691. if (!test_bit(__ICE_DOWN, pf->state)) {
  692. /* Give it a little more time to try to come back */
  693. msleep(75);
  694. if (!test_bit(__ICE_DOWN, pf->state))
  695. return ice_nway_reset(netdev);
  696. }
  697. return err;
  698. }
  699. /**
  700. * ice_get_rxfh_key_size - get the RSS hash key size
  701. * @netdev: network interface device structure
  702. *
  703. * Returns the table size.
  704. */
  705. static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
  706. {
  707. return ICE_VSIQF_HKEY_ARRAY_SIZE;
  708. }
  709. /**
  710. * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
  711. * @netdev: network interface device structure
  712. *
  713. * Returns the table size.
  714. */
  715. static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
  716. {
  717. struct ice_netdev_priv *np = netdev_priv(netdev);
  718. return np->vsi->rss_table_size;
  719. }
  720. /**
  721. * ice_get_rxfh - get the rx flow hash indirection table
  722. * @netdev: network interface device structure
  723. * @indir: indirection table
  724. * @key: hash key
  725. * @hfunc: hash function
  726. *
  727. * Reads the indirection table directly from the hardware.
  728. */
  729. static int
  730. ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
  731. {
  732. struct ice_netdev_priv *np = netdev_priv(netdev);
  733. struct ice_vsi *vsi = np->vsi;
  734. struct ice_pf *pf = vsi->back;
  735. int ret = 0, i;
  736. u8 *lut;
  737. if (hfunc)
  738. *hfunc = ETH_RSS_HASH_TOP;
  739. if (!indir)
  740. return 0;
  741. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
  742. /* RSS not supported return error here */
  743. netdev_warn(netdev, "RSS is not configured on this VSI!\n");
  744. return -EIO;
  745. }
  746. lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
  747. if (!lut)
  748. return -ENOMEM;
  749. if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
  750. ret = -EIO;
  751. goto out;
  752. }
  753. for (i = 0; i < vsi->rss_table_size; i++)
  754. indir[i] = (u32)(lut[i]);
  755. out:
  756. devm_kfree(&pf->pdev->dev, lut);
  757. return ret;
  758. }
  759. /**
  760. * ice_set_rxfh - set the rx flow hash indirection table
  761. * @netdev: network interface device structure
  762. * @indir: indirection table
  763. * @key: hash key
  764. * @hfunc: hash function
  765. *
  766. * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  767. * returns 0 after programming the table.
  768. */
  769. static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
  770. const u8 *key, const u8 hfunc)
  771. {
  772. struct ice_netdev_priv *np = netdev_priv(netdev);
  773. struct ice_vsi *vsi = np->vsi;
  774. struct ice_pf *pf = vsi->back;
  775. u8 *seed = NULL;
  776. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  777. return -EOPNOTSUPP;
  778. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
  779. /* RSS not supported return error here */
  780. netdev_warn(netdev, "RSS is not configured on this VSI!\n");
  781. return -EIO;
  782. }
  783. if (key) {
  784. if (!vsi->rss_hkey_user) {
  785. vsi->rss_hkey_user =
  786. devm_kzalloc(&pf->pdev->dev,
  787. ICE_VSIQF_HKEY_ARRAY_SIZE,
  788. GFP_KERNEL);
  789. if (!vsi->rss_hkey_user)
  790. return -ENOMEM;
  791. }
  792. memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
  793. seed = vsi->rss_hkey_user;
  794. }
  795. if (!vsi->rss_lut_user) {
  796. vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
  797. vsi->rss_table_size,
  798. GFP_KERNEL);
  799. if (!vsi->rss_lut_user)
  800. return -ENOMEM;
  801. }
  802. /* Each 32 bits pointed by 'indir' is stored with a lut entry */
  803. if (indir) {
  804. int i;
  805. for (i = 0; i < vsi->rss_table_size; i++)
  806. vsi->rss_lut_user[i] = (u8)(indir[i]);
  807. } else {
  808. ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
  809. vsi->rss_size);
  810. }
  811. if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
  812. return -EIO;
  813. return 0;
  814. }
  815. static const struct ethtool_ops ice_ethtool_ops = {
  816. .get_link_ksettings = ice_get_link_ksettings,
  817. .get_drvinfo = ice_get_drvinfo,
  818. .get_regs_len = ice_get_regs_len,
  819. .get_regs = ice_get_regs,
  820. .get_msglevel = ice_get_msglevel,
  821. .set_msglevel = ice_set_msglevel,
  822. .get_link = ethtool_op_get_link,
  823. .get_strings = ice_get_strings,
  824. .get_ethtool_stats = ice_get_ethtool_stats,
  825. .get_sset_count = ice_get_sset_count,
  826. .get_rxnfc = ice_get_rxnfc,
  827. .get_ringparam = ice_get_ringparam,
  828. .set_ringparam = ice_set_ringparam,
  829. .nway_reset = ice_nway_reset,
  830. .get_pauseparam = ice_get_pauseparam,
  831. .set_pauseparam = ice_set_pauseparam,
  832. .get_rxfh_key_size = ice_get_rxfh_key_size,
  833. .get_rxfh_indir_size = ice_get_rxfh_indir_size,
  834. .get_rxfh = ice_get_rxfh,
  835. .set_rxfh = ice_set_rxfh,
  836. };
  837. /**
  838. * ice_set_ethtool_ops - setup netdev ethtool ops
  839. * @netdev: network interface device structure
  840. *
  841. * setup netdev ethtool ops with ice specific ops
  842. */
  843. void ice_set_ethtool_ops(struct net_device *netdev)
  844. {
  845. netdev->ethtool_ops = &ice_ethtool_ops;
  846. }