ice_ethtool.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. /* ethtool support for ice */
  4. #include "ice.h"
  5. struct ice_stats {
  6. char stat_string[ETH_GSTRING_LEN];
  7. int sizeof_stat;
  8. int stat_offset;
  9. };
  10. #define ICE_STAT(_type, _name, _stat) { \
  11. .stat_string = _name, \
  12. .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
  13. .stat_offset = offsetof(_type, _stat) \
  14. }
  15. #define ICE_VSI_STAT(_name, _stat) \
  16. ICE_STAT(struct ice_vsi, _name, _stat)
  17. #define ICE_PF_STAT(_name, _stat) \
  18. ICE_STAT(struct ice_pf, _name, _stat)
  19. static int ice_q_stats_len(struct net_device *netdev)
  20. {
  21. struct ice_netdev_priv *np = netdev_priv(netdev);
  22. return ((np->vsi->num_txq + np->vsi->num_rxq) *
  23. (sizeof(struct ice_q_stats) / sizeof(u64)));
  24. }
  25. #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
  26. #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
  27. #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
  28. ice_q_stats_len(n))
  29. static const struct ice_stats ice_gstrings_vsi_stats[] = {
  30. ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
  31. ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
  32. ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
  33. ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
  34. ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
  35. ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
  36. ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
  37. ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
  38. ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
  39. ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
  40. ICE_VSI_STAT("tx_linearize", tx_linearize),
  41. ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
  42. ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
  43. ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
  44. };
  45. /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  46. * but they aren't. This device is capable of supporting multiple
  47. * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
  48. * netdevs whereas the PF_STATs are for the physical function that's
  49. * hosting these netdevs.
  50. *
  51. * The PF_STATs are appended to the netdev stats only when ethtool -S
  52. * is queried on the base PF netdev.
  53. */
  54. static struct ice_stats ice_gstrings_pf_stats[] = {
  55. ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
  56. ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
  57. ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
  58. ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
  59. ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
  60. ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
  61. ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
  62. ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
  63. ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
  64. ICE_PF_STAT("tx_size_64", stats.tx_size_64),
  65. ICE_PF_STAT("rx_size_64", stats.rx_size_64),
  66. ICE_PF_STAT("tx_size_127", stats.tx_size_127),
  67. ICE_PF_STAT("rx_size_127", stats.rx_size_127),
  68. ICE_PF_STAT("tx_size_255", stats.tx_size_255),
  69. ICE_PF_STAT("rx_size_255", stats.rx_size_255),
  70. ICE_PF_STAT("tx_size_511", stats.tx_size_511),
  71. ICE_PF_STAT("rx_size_511", stats.rx_size_511),
  72. ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
  73. ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
  74. ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
  75. ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
  76. ICE_PF_STAT("tx_size_big", stats.tx_size_big),
  77. ICE_PF_STAT("rx_size_big", stats.rx_size_big),
  78. ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
  79. ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
  80. ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
  81. ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
  82. ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
  83. ICE_PF_STAT("rx_undersize", stats.rx_undersize),
  84. ICE_PF_STAT("rx_fragments", stats.rx_fragments),
  85. ICE_PF_STAT("rx_oversize", stats.rx_oversize),
  86. ICE_PF_STAT("rx_jabber", stats.rx_jabber),
  87. ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
  88. ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
  89. ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
  90. ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
  91. ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
  92. ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
  93. ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
  94. };
  95. static u32 ice_regs_dump_list[] = {
  96. PFGEN_STATE,
  97. PRTGEN_STATUS,
  98. QRX_CTRL(0),
  99. QINT_TQCTL(0),
  100. QINT_RQCTL(0),
  101. PFINT_OICR_ENA,
  102. QRX_ITR(0),
  103. };
  104. /**
  105. * ice_nvm_version_str - format the NVM version strings
  106. * @hw: ptr to the hardware info
  107. */
  108. static char *ice_nvm_version_str(struct ice_hw *hw)
  109. {
  110. static char buf[ICE_ETHTOOL_FWVER_LEN];
  111. u8 ver, patch;
  112. u32 full_ver;
  113. u16 build;
  114. full_ver = hw->nvm.oem_ver;
  115. ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
  116. build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
  117. ICE_OEM_VER_BUILD_SHIFT);
  118. patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
  119. snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
  120. (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
  121. (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
  122. hw->nvm.eetrack, ver, build, patch);
  123. return buf;
  124. }
  125. static void
  126. ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
  127. {
  128. struct ice_netdev_priv *np = netdev_priv(netdev);
  129. struct ice_vsi *vsi = np->vsi;
  130. struct ice_pf *pf = vsi->back;
  131. strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
  132. strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
  133. strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
  134. sizeof(drvinfo->fw_version));
  135. strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
  136. sizeof(drvinfo->bus_info));
  137. }
  138. static int ice_get_regs_len(struct net_device __always_unused *netdev)
  139. {
  140. return sizeof(ice_regs_dump_list);
  141. }
  142. static void
  143. ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
  144. {
  145. struct ice_netdev_priv *np = netdev_priv(netdev);
  146. struct ice_pf *pf = np->vsi->back;
  147. struct ice_hw *hw = &pf->hw;
  148. u32 *regs_buf = (u32 *)p;
  149. int i;
  150. regs->version = 1;
  151. for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
  152. regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
  153. }
  154. static u32 ice_get_msglevel(struct net_device *netdev)
  155. {
  156. struct ice_netdev_priv *np = netdev_priv(netdev);
  157. struct ice_pf *pf = np->vsi->back;
  158. #ifndef CONFIG_DYNAMIC_DEBUG
  159. if (pf->hw.debug_mask)
  160. netdev_info(netdev, "hw debug_mask: 0x%llX\n",
  161. pf->hw.debug_mask);
  162. #endif /* !CONFIG_DYNAMIC_DEBUG */
  163. return pf->msg_enable;
  164. }
  165. static void ice_set_msglevel(struct net_device *netdev, u32 data)
  166. {
  167. struct ice_netdev_priv *np = netdev_priv(netdev);
  168. struct ice_pf *pf = np->vsi->back;
  169. #ifndef CONFIG_DYNAMIC_DEBUG
  170. if (ICE_DBG_USER & data)
  171. pf->hw.debug_mask = data;
  172. else
  173. pf->msg_enable = data;
  174. #else
  175. pf->msg_enable = data;
  176. #endif /* !CONFIG_DYNAMIC_DEBUG */
  177. }
  178. static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
  179. {
  180. struct ice_netdev_priv *np = netdev_priv(netdev);
  181. struct ice_vsi *vsi = np->vsi;
  182. char *p = (char *)data;
  183. unsigned int i;
  184. switch (stringset) {
  185. case ETH_SS_STATS:
  186. for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
  187. snprintf(p, ETH_GSTRING_LEN, "%s",
  188. ice_gstrings_vsi_stats[i].stat_string);
  189. p += ETH_GSTRING_LEN;
  190. }
  191. ice_for_each_txq(vsi, i) {
  192. snprintf(p, ETH_GSTRING_LEN,
  193. "tx-queue-%u.tx_packets", i);
  194. p += ETH_GSTRING_LEN;
  195. snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
  196. p += ETH_GSTRING_LEN;
  197. }
  198. ice_for_each_rxq(vsi, i) {
  199. snprintf(p, ETH_GSTRING_LEN,
  200. "rx-queue-%u.rx_packets", i);
  201. p += ETH_GSTRING_LEN;
  202. snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
  203. p += ETH_GSTRING_LEN;
  204. }
  205. if (vsi->type != ICE_VSI_PF)
  206. return;
  207. for (i = 0; i < ICE_PF_STATS_LEN; i++) {
  208. snprintf(p, ETH_GSTRING_LEN, "port.%s",
  209. ice_gstrings_pf_stats[i].stat_string);
  210. p += ETH_GSTRING_LEN;
  211. }
  212. break;
  213. default:
  214. break;
  215. }
  216. }
  217. static int ice_get_sset_count(struct net_device *netdev, int sset)
  218. {
  219. switch (sset) {
  220. case ETH_SS_STATS:
  221. return ICE_ALL_STATS_LEN(netdev);
  222. default:
  223. return -EOPNOTSUPP;
  224. }
  225. }
  226. static void
  227. ice_get_ethtool_stats(struct net_device *netdev,
  228. struct ethtool_stats __always_unused *stats, u64 *data)
  229. {
  230. struct ice_netdev_priv *np = netdev_priv(netdev);
  231. struct ice_vsi *vsi = np->vsi;
  232. struct ice_pf *pf = vsi->back;
  233. struct ice_ring *ring;
  234. unsigned int j = 0;
  235. int i = 0;
  236. char *p;
  237. for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
  238. p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
  239. data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
  240. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  241. }
  242. /* populate per queue stats */
  243. rcu_read_lock();
  244. ice_for_each_txq(vsi, j) {
  245. ring = READ_ONCE(vsi->tx_rings[j]);
  246. if (!ring)
  247. continue;
  248. data[i++] = ring->stats.pkts;
  249. data[i++] = ring->stats.bytes;
  250. }
  251. ice_for_each_rxq(vsi, j) {
  252. ring = READ_ONCE(vsi->rx_rings[j]);
  253. data[i++] = ring->stats.pkts;
  254. data[i++] = ring->stats.bytes;
  255. }
  256. rcu_read_unlock();
  257. if (vsi->type != ICE_VSI_PF)
  258. return;
  259. for (j = 0; j < ICE_PF_STATS_LEN; j++) {
  260. p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
  261. data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
  262. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  263. }
  264. }
  265. static int
  266. ice_get_link_ksettings(struct net_device *netdev,
  267. struct ethtool_link_ksettings *ks)
  268. {
  269. struct ice_netdev_priv *np = netdev_priv(netdev);
  270. struct ice_link_status *hw_link_info;
  271. struct ice_vsi *vsi = np->vsi;
  272. bool link_up;
  273. hw_link_info = &vsi->port_info->phy.link_info;
  274. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  275. ethtool_link_ksettings_add_link_mode(ks, supported,
  276. 10000baseT_Full);
  277. ethtool_link_ksettings_add_link_mode(ks, advertising,
  278. 10000baseT_Full);
  279. /* set speed and duplex */
  280. if (link_up) {
  281. switch (hw_link_info->link_speed) {
  282. case ICE_AQ_LINK_SPEED_100MB:
  283. ks->base.speed = SPEED_100;
  284. break;
  285. case ICE_AQ_LINK_SPEED_2500MB:
  286. ks->base.speed = SPEED_2500;
  287. break;
  288. case ICE_AQ_LINK_SPEED_5GB:
  289. ks->base.speed = SPEED_5000;
  290. break;
  291. case ICE_AQ_LINK_SPEED_10GB:
  292. ks->base.speed = SPEED_10000;
  293. break;
  294. case ICE_AQ_LINK_SPEED_25GB:
  295. ks->base.speed = SPEED_25000;
  296. break;
  297. case ICE_AQ_LINK_SPEED_40GB:
  298. ks->base.speed = SPEED_40000;
  299. break;
  300. default:
  301. ks->base.speed = SPEED_UNKNOWN;
  302. break;
  303. }
  304. ks->base.duplex = DUPLEX_FULL;
  305. } else {
  306. ks->base.speed = SPEED_UNKNOWN;
  307. ks->base.duplex = DUPLEX_UNKNOWN;
  308. }
  309. /* set autoneg settings */
  310. ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
  311. AUTONEG_ENABLE : AUTONEG_DISABLE);
  312. /* set media type settings */
  313. switch (vsi->port_info->phy.media_type) {
  314. case ICE_MEDIA_FIBER:
  315. ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
  316. ks->base.port = PORT_FIBRE;
  317. break;
  318. case ICE_MEDIA_BASET:
  319. ethtool_link_ksettings_add_link_mode(ks, supported, TP);
  320. ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
  321. ks->base.port = PORT_TP;
  322. break;
  323. case ICE_MEDIA_BACKPLANE:
  324. ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
  325. ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
  326. ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
  327. ethtool_link_ksettings_add_link_mode(ks, advertising,
  328. Backplane);
  329. ks->base.port = PORT_NONE;
  330. break;
  331. case ICE_MEDIA_DA:
  332. ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
  333. ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
  334. ks->base.port = PORT_DA;
  335. break;
  336. default:
  337. ks->base.port = PORT_OTHER;
  338. break;
  339. }
  340. /* flow control is symmetric and always supported */
  341. ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
  342. switch (vsi->port_info->fc.req_mode) {
  343. case ICE_FC_FULL:
  344. ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
  345. break;
  346. case ICE_FC_TX_PAUSE:
  347. ethtool_link_ksettings_add_link_mode(ks, advertising,
  348. Asym_Pause);
  349. break;
  350. case ICE_FC_RX_PAUSE:
  351. ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
  352. ethtool_link_ksettings_add_link_mode(ks, advertising,
  353. Asym_Pause);
  354. break;
  355. case ICE_FC_PFC:
  356. default:
  357. ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
  358. ethtool_link_ksettings_del_link_mode(ks, advertising,
  359. Asym_Pause);
  360. break;
  361. }
  362. return 0;
  363. }
  364. /**
  365. * ice_get_rxnfc - command to get RX flow classification rules
  366. * @netdev: network interface device structure
  367. * @cmd: ethtool rxnfc command
  368. * @rule_locs: buffer to rturn Rx flow classification rules
  369. *
  370. * Returns Success if the command is supported.
  371. */
  372. static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
  373. u32 __always_unused *rule_locs)
  374. {
  375. struct ice_netdev_priv *np = netdev_priv(netdev);
  376. struct ice_vsi *vsi = np->vsi;
  377. int ret = -EOPNOTSUPP;
  378. switch (cmd->cmd) {
  379. case ETHTOOL_GRXRINGS:
  380. cmd->data = vsi->rss_size;
  381. ret = 0;
  382. break;
  383. default:
  384. break;
  385. }
  386. return ret;
  387. }
  388. static void
  389. ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
  390. {
  391. struct ice_netdev_priv *np = netdev_priv(netdev);
  392. struct ice_vsi *vsi = np->vsi;
  393. ring->rx_max_pending = ICE_MAX_NUM_DESC;
  394. ring->tx_max_pending = ICE_MAX_NUM_DESC;
  395. ring->rx_pending = vsi->rx_rings[0]->count;
  396. ring->tx_pending = vsi->tx_rings[0]->count;
  397. ring->rx_mini_pending = ICE_MIN_NUM_DESC;
  398. ring->rx_mini_max_pending = 0;
  399. ring->rx_jumbo_max_pending = 0;
  400. ring->rx_jumbo_pending = 0;
  401. }
  402. static int
  403. ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
  404. {
  405. struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
  406. struct ice_netdev_priv *np = netdev_priv(netdev);
  407. struct ice_vsi *vsi = np->vsi;
  408. struct ice_pf *pf = vsi->back;
  409. int i, timeout = 50, err = 0;
  410. u32 new_rx_cnt, new_tx_cnt;
  411. if (ring->tx_pending > ICE_MAX_NUM_DESC ||
  412. ring->tx_pending < ICE_MIN_NUM_DESC ||
  413. ring->rx_pending > ICE_MAX_NUM_DESC ||
  414. ring->rx_pending < ICE_MIN_NUM_DESC) {
  415. netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
  416. ring->tx_pending, ring->rx_pending,
  417. ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
  418. return -EINVAL;
  419. }
  420. new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
  421. new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
  422. /* if nothing to do return success */
  423. if (new_tx_cnt == vsi->tx_rings[0]->count &&
  424. new_rx_cnt == vsi->rx_rings[0]->count) {
  425. netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
  426. return 0;
  427. }
  428. while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
  429. timeout--;
  430. if (!timeout)
  431. return -EBUSY;
  432. usleep_range(1000, 2000);
  433. }
  434. /* set for the next time the netdev is started */
  435. if (!netif_running(vsi->netdev)) {
  436. for (i = 0; i < vsi->alloc_txq; i++)
  437. vsi->tx_rings[i]->count = new_tx_cnt;
  438. for (i = 0; i < vsi->alloc_rxq; i++)
  439. vsi->rx_rings[i]->count = new_rx_cnt;
  440. netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
  441. goto done;
  442. }
  443. if (new_tx_cnt == vsi->tx_rings[0]->count)
  444. goto process_rx;
  445. /* alloc updated Tx resources */
  446. netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
  447. vsi->tx_rings[0]->count, new_tx_cnt);
  448. tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
  449. sizeof(struct ice_ring), GFP_KERNEL);
  450. if (!tx_rings) {
  451. err = -ENOMEM;
  452. goto done;
  453. }
  454. for (i = 0; i < vsi->num_txq; i++) {
  455. /* clone ring and setup updated count */
  456. tx_rings[i] = *vsi->tx_rings[i];
  457. tx_rings[i].count = new_tx_cnt;
  458. tx_rings[i].desc = NULL;
  459. tx_rings[i].tx_buf = NULL;
  460. err = ice_setup_tx_ring(&tx_rings[i]);
  461. if (err) {
  462. while (i) {
  463. i--;
  464. ice_clean_tx_ring(&tx_rings[i]);
  465. }
  466. devm_kfree(&pf->pdev->dev, tx_rings);
  467. goto done;
  468. }
  469. }
  470. process_rx:
  471. if (new_rx_cnt == vsi->rx_rings[0]->count)
  472. goto process_link;
  473. /* alloc updated Rx resources */
  474. netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
  475. vsi->rx_rings[0]->count, new_rx_cnt);
  476. rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
  477. sizeof(struct ice_ring), GFP_KERNEL);
  478. if (!rx_rings) {
  479. err = -ENOMEM;
  480. goto done;
  481. }
  482. for (i = 0; i < vsi->num_rxq; i++) {
  483. /* clone ring and setup updated count */
  484. rx_rings[i] = *vsi->rx_rings[i];
  485. rx_rings[i].count = new_rx_cnt;
  486. rx_rings[i].desc = NULL;
  487. rx_rings[i].rx_buf = NULL;
  488. /* this is to allow wr32 to have something to write to
  489. * during early allocation of Rx buffers
  490. */
  491. rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
  492. err = ice_setup_rx_ring(&rx_rings[i]);
  493. if (err)
  494. goto rx_unwind;
  495. /* allocate Rx buffers */
  496. err = ice_alloc_rx_bufs(&rx_rings[i],
  497. ICE_DESC_UNUSED(&rx_rings[i]));
  498. rx_unwind:
  499. if (err) {
  500. while (i) {
  501. i--;
  502. ice_free_rx_ring(&rx_rings[i]);
  503. }
  504. devm_kfree(&pf->pdev->dev, rx_rings);
  505. err = -ENOMEM;
  506. goto free_tx;
  507. }
  508. }
  509. process_link:
  510. /* Bring interface down, copy in the new ring info, then restore the
  511. * interface. if VSI is up, bring it down and then back up
  512. */
  513. if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
  514. ice_down(vsi);
  515. if (tx_rings) {
  516. for (i = 0; i < vsi->alloc_txq; i++) {
  517. ice_free_tx_ring(vsi->tx_rings[i]);
  518. *vsi->tx_rings[i] = tx_rings[i];
  519. }
  520. devm_kfree(&pf->pdev->dev, tx_rings);
  521. }
  522. if (rx_rings) {
  523. for (i = 0; i < vsi->alloc_rxq; i++) {
  524. ice_free_rx_ring(vsi->rx_rings[i]);
  525. /* copy the real tail offset */
  526. rx_rings[i].tail = vsi->rx_rings[i]->tail;
  527. /* this is to fake out the allocation routine
  528. * into thinking it has to realloc everything
  529. * but the recycling logic will let us re-use
  530. * the buffers allocated above
  531. */
  532. rx_rings[i].next_to_use = 0;
  533. rx_rings[i].next_to_clean = 0;
  534. rx_rings[i].next_to_alloc = 0;
  535. *vsi->rx_rings[i] = rx_rings[i];
  536. }
  537. devm_kfree(&pf->pdev->dev, rx_rings);
  538. }
  539. ice_up(vsi);
  540. }
  541. goto done;
  542. free_tx:
  543. /* error cleanup if the Rx allocations failed after getting Tx */
  544. if (tx_rings) {
  545. for (i = 0; i < vsi->alloc_txq; i++)
  546. ice_free_tx_ring(&tx_rings[i]);
  547. devm_kfree(&pf->pdev->dev, tx_rings);
  548. }
  549. done:
  550. clear_bit(__ICE_CFG_BUSY, pf->state);
  551. return err;
  552. }
  553. static int ice_nway_reset(struct net_device *netdev)
  554. {
  555. /* restart autonegotiation */
  556. struct ice_netdev_priv *np = netdev_priv(netdev);
  557. struct ice_link_status *hw_link_info;
  558. struct ice_vsi *vsi = np->vsi;
  559. struct ice_port_info *pi;
  560. enum ice_status status;
  561. bool link_up;
  562. pi = vsi->port_info;
  563. hw_link_info = &pi->phy.link_info;
  564. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  565. status = ice_aq_set_link_restart_an(pi, link_up, NULL);
  566. if (status) {
  567. netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
  568. status, pi->hw->adminq.sq_last_status);
  569. return -EIO;
  570. }
  571. return 0;
  572. }
  573. /**
  574. * ice_get_pauseparam - Get Flow Control status
  575. * @netdev: network interface device structure
  576. * @pause: ethernet pause (flow control) parameters
  577. */
  578. static void
  579. ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  580. {
  581. struct ice_netdev_priv *np = netdev_priv(netdev);
  582. struct ice_port_info *pi;
  583. pi = np->vsi->port_info;
  584. pause->autoneg =
  585. ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
  586. AUTONEG_ENABLE : AUTONEG_DISABLE);
  587. if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
  588. pause->rx_pause = 1;
  589. } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
  590. pause->tx_pause = 1;
  591. } else if (pi->fc.current_mode == ICE_FC_FULL) {
  592. pause->rx_pause = 1;
  593. pause->tx_pause = 1;
  594. }
  595. }
  596. /**
  597. * ice_set_pauseparam - Set Flow Control parameter
  598. * @netdev: network interface device structure
  599. * @pause: return tx/rx flow control status
  600. */
  601. static int
  602. ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  603. {
  604. struct ice_netdev_priv *np = netdev_priv(netdev);
  605. struct ice_link_status *hw_link_info;
  606. struct ice_pf *pf = np->vsi->back;
  607. struct ice_vsi *vsi = np->vsi;
  608. struct ice_hw *hw = &pf->hw;
  609. struct ice_port_info *pi;
  610. enum ice_status status;
  611. u8 aq_failures;
  612. bool link_up;
  613. int err = 0;
  614. pi = vsi->port_info;
  615. hw_link_info = &pi->phy.link_info;
  616. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  617. /* Changing the port's flow control is not supported if this isn't the
  618. * PF VSI
  619. */
  620. if (vsi->type != ICE_VSI_PF) {
  621. netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
  622. return -EOPNOTSUPP;
  623. }
  624. if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
  625. netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
  626. return -EOPNOTSUPP;
  627. }
  628. /* If we have link and don't have autoneg */
  629. if (!test_bit(__ICE_DOWN, pf->state) &&
  630. !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
  631. /* Send message that it might not necessarily work*/
  632. netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
  633. }
  634. if (pause->rx_pause && pause->tx_pause)
  635. pi->fc.req_mode = ICE_FC_FULL;
  636. else if (pause->rx_pause && !pause->tx_pause)
  637. pi->fc.req_mode = ICE_FC_RX_PAUSE;
  638. else if (!pause->rx_pause && pause->tx_pause)
  639. pi->fc.req_mode = ICE_FC_TX_PAUSE;
  640. else if (!pause->rx_pause && !pause->tx_pause)
  641. pi->fc.req_mode = ICE_FC_NONE;
  642. else
  643. return -EINVAL;
  644. /* Tell the OS link is going down, the link will go back up when fw
  645. * says it is ready asynchronously
  646. */
  647. ice_print_link_msg(vsi, false);
  648. netif_carrier_off(netdev);
  649. netif_tx_stop_all_queues(netdev);
  650. /* Set the FC mode and only restart AN if link is up */
  651. status = ice_set_fc(pi, &aq_failures, link_up);
  652. if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
  653. netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
  654. status, hw->adminq.sq_last_status);
  655. err = -EAGAIN;
  656. } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
  657. netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
  658. status, hw->adminq.sq_last_status);
  659. err = -EAGAIN;
  660. } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
  661. netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
  662. status, hw->adminq.sq_last_status);
  663. err = -EAGAIN;
  664. }
  665. if (!test_bit(__ICE_DOWN, pf->state)) {
  666. /* Give it a little more time to try to come back */
  667. msleep(75);
  668. if (!test_bit(__ICE_DOWN, pf->state))
  669. return ice_nway_reset(netdev);
  670. }
  671. return err;
  672. }
  673. /**
  674. * ice_get_rxfh_key_size - get the RSS hash key size
  675. * @netdev: network interface device structure
  676. *
  677. * Returns the table size.
  678. */
  679. static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
  680. {
  681. return ICE_VSIQF_HKEY_ARRAY_SIZE;
  682. }
  683. /**
  684. * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
  685. * @netdev: network interface device structure
  686. *
  687. * Returns the table size.
  688. */
  689. static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
  690. {
  691. struct ice_netdev_priv *np = netdev_priv(netdev);
  692. return np->vsi->rss_table_size;
  693. }
  694. /**
  695. * ice_get_rxfh - get the rx flow hash indirection table
  696. * @netdev: network interface device structure
  697. * @indir: indirection table
  698. * @key: hash key
  699. * @hfunc: hash function
  700. *
  701. * Reads the indirection table directly from the hardware.
  702. */
  703. static int
  704. ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
  705. {
  706. struct ice_netdev_priv *np = netdev_priv(netdev);
  707. struct ice_vsi *vsi = np->vsi;
  708. struct ice_pf *pf = vsi->back;
  709. int ret = 0, i;
  710. u8 *lut;
  711. if (hfunc)
  712. *hfunc = ETH_RSS_HASH_TOP;
  713. if (!indir)
  714. return 0;
  715. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
  716. /* RSS not supported return error here */
  717. netdev_warn(netdev, "RSS is not configured on this VSI!\n");
  718. return -EIO;
  719. }
  720. lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
  721. if (!lut)
  722. return -ENOMEM;
  723. if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
  724. ret = -EIO;
  725. goto out;
  726. }
  727. for (i = 0; i < vsi->rss_table_size; i++)
  728. indir[i] = (u32)(lut[i]);
  729. out:
  730. devm_kfree(&pf->pdev->dev, lut);
  731. return ret;
  732. }
  733. /**
  734. * ice_set_rxfh - set the rx flow hash indirection table
  735. * @netdev: network interface device structure
  736. * @indir: indirection table
  737. * @key: hash key
  738. * @hfunc: hash function
  739. *
  740. * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  741. * returns 0 after programming the table.
  742. */
  743. static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
  744. const u8 *key, const u8 hfunc)
  745. {
  746. struct ice_netdev_priv *np = netdev_priv(netdev);
  747. struct ice_vsi *vsi = np->vsi;
  748. struct ice_pf *pf = vsi->back;
  749. u8 *seed = NULL;
  750. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  751. return -EOPNOTSUPP;
  752. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
  753. /* RSS not supported return error here */
  754. netdev_warn(netdev, "RSS is not configured on this VSI!\n");
  755. return -EIO;
  756. }
  757. if (key) {
  758. if (!vsi->rss_hkey_user) {
  759. vsi->rss_hkey_user =
  760. devm_kzalloc(&pf->pdev->dev,
  761. ICE_VSIQF_HKEY_ARRAY_SIZE,
  762. GFP_KERNEL);
  763. if (!vsi->rss_hkey_user)
  764. return -ENOMEM;
  765. }
  766. memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
  767. seed = vsi->rss_hkey_user;
  768. }
  769. if (!vsi->rss_lut_user) {
  770. vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
  771. vsi->rss_table_size,
  772. GFP_KERNEL);
  773. if (!vsi->rss_lut_user)
  774. return -ENOMEM;
  775. }
  776. /* Each 32 bits pointed by 'indir' is stored with a lut entry */
  777. if (indir) {
  778. int i;
  779. for (i = 0; i < vsi->rss_table_size; i++)
  780. vsi->rss_lut_user[i] = (u8)(indir[i]);
  781. } else {
  782. ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
  783. vsi->rss_size);
  784. }
  785. if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
  786. return -EIO;
  787. return 0;
  788. }
  789. static const struct ethtool_ops ice_ethtool_ops = {
  790. .get_link_ksettings = ice_get_link_ksettings,
  791. .get_drvinfo = ice_get_drvinfo,
  792. .get_regs_len = ice_get_regs_len,
  793. .get_regs = ice_get_regs,
  794. .get_msglevel = ice_get_msglevel,
  795. .set_msglevel = ice_set_msglevel,
  796. .get_link = ethtool_op_get_link,
  797. .get_strings = ice_get_strings,
  798. .get_ethtool_stats = ice_get_ethtool_stats,
  799. .get_sset_count = ice_get_sset_count,
  800. .get_rxnfc = ice_get_rxnfc,
  801. .get_ringparam = ice_get_ringparam,
  802. .set_ringparam = ice_set_ringparam,
  803. .nway_reset = ice_nway_reset,
  804. .get_pauseparam = ice_get_pauseparam,
  805. .set_pauseparam = ice_set_pauseparam,
  806. .get_rxfh_key_size = ice_get_rxfh_key_size,
  807. .get_rxfh_indir_size = ice_get_rxfh_indir_size,
  808. .get_rxfh = ice_get_rxfh,
  809. .set_rxfh = ice_set_rxfh,
  810. };
  811. /**
  812. * ice_set_ethtool_ops - setup netdev ethtool ops
  813. * @netdev: network interface device structure
  814. *
  815. * setup netdev ethtool ops with ice specific ops
  816. */
  817. void ice_set_ethtool_ops(struct net_device *netdev)
  818. {
  819. netdev->ethtool_ops = &ice_ethtool_ops;
  820. }