i40e_ethtool.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. /* ethtool support for i40e */
  27. #include "i40e.h"
  28. #include "i40e_diag.h"
  29. struct i40e_stats {
  30. char stat_string[ETH_GSTRING_LEN];
  31. int sizeof_stat;
  32. int stat_offset;
  33. };
  34. #define I40E_STAT(_type, _name, _stat) { \
  35. .stat_string = _name, \
  36. .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
  37. .stat_offset = offsetof(_type, _stat) \
  38. }
  39. #define I40E_NETDEV_STAT(_net_stat) \
  40. I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
  41. #define I40E_PF_STAT(_name, _stat) \
  42. I40E_STAT(struct i40e_pf, _name, _stat)
  43. #define I40E_VSI_STAT(_name, _stat) \
  44. I40E_STAT(struct i40e_vsi, _name, _stat)
  45. static const struct i40e_stats i40e_gstrings_net_stats[] = {
  46. I40E_NETDEV_STAT(rx_packets),
  47. I40E_NETDEV_STAT(tx_packets),
  48. I40E_NETDEV_STAT(rx_bytes),
  49. I40E_NETDEV_STAT(tx_bytes),
  50. I40E_NETDEV_STAT(rx_errors),
  51. I40E_NETDEV_STAT(tx_errors),
  52. I40E_NETDEV_STAT(rx_dropped),
  53. I40E_NETDEV_STAT(tx_dropped),
  54. I40E_NETDEV_STAT(multicast),
  55. I40E_NETDEV_STAT(collisions),
  56. I40E_NETDEV_STAT(rx_length_errors),
  57. I40E_NETDEV_STAT(rx_crc_errors),
  58. };
  59. /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  60. * but they are separate. This device supports Virtualization, and
  61. * as such might have several netdevs supporting VMDq and FCoE going
  62. * through a single port. The NETDEV_STATs are for individual netdevs
  63. * seen at the top of the stack, and the PF_STATs are for the physical
  64. * function at the bottom of the stack hosting those netdevs.
  65. *
  66. * The PF_STATs are appended to the netdev stats only when ethtool -S
  67. * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
  68. */
  69. static struct i40e_stats i40e_gstrings_stats[] = {
  70. I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
  71. I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
  72. I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
  73. I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
  74. I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
  75. I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
  76. I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
  77. I40E_PF_STAT("crc_errors", stats.crc_errors),
  78. I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
  79. I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
  80. I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
  81. I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
  82. I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
  83. I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
  84. I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
  85. I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
  86. I40E_PF_STAT("rx_size_64", stats.rx_size_64),
  87. I40E_PF_STAT("rx_size_127", stats.rx_size_127),
  88. I40E_PF_STAT("rx_size_255", stats.rx_size_255),
  89. I40E_PF_STAT("rx_size_511", stats.rx_size_511),
  90. I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
  91. I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
  92. I40E_PF_STAT("rx_size_big", stats.rx_size_big),
  93. I40E_PF_STAT("tx_size_64", stats.tx_size_64),
  94. I40E_PF_STAT("tx_size_127", stats.tx_size_127),
  95. I40E_PF_STAT("tx_size_255", stats.tx_size_255),
  96. I40E_PF_STAT("tx_size_511", stats.tx_size_511),
  97. I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
  98. I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
  99. I40E_PF_STAT("tx_size_big", stats.tx_size_big),
  100. I40E_PF_STAT("rx_undersize", stats.rx_undersize),
  101. I40E_PF_STAT("rx_fragments", stats.rx_fragments),
  102. I40E_PF_STAT("rx_oversize", stats.rx_oversize),
  103. I40E_PF_STAT("rx_jabber", stats.rx_jabber),
  104. I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
  105. I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
  106. I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
  107. };
  108. #define I40E_QUEUE_STATS_LEN(n) \
  109. ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
  110. ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
  111. #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
  112. #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
  113. #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
  114. I40E_QUEUE_STATS_LEN((n)))
  115. #define I40E_PFC_STATS_LEN ( \
  116. (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
  117. FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
  118. FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
  119. FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
  120. FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
  121. / sizeof(u64))
  122. #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
  123. I40E_PFC_STATS_LEN + \
  124. I40E_VSI_STATS_LEN((n)))
  125. enum i40e_ethtool_test_id {
  126. I40E_ETH_TEST_REG = 0,
  127. I40E_ETH_TEST_EEPROM,
  128. I40E_ETH_TEST_INTR,
  129. I40E_ETH_TEST_LOOPBACK,
  130. I40E_ETH_TEST_LINK,
  131. };
  132. static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
  133. "Register test (offline)",
  134. "Eeprom test (offline)",
  135. "Interrupt test (offline)",
  136. "Loopback test (offline)",
  137. "Link test (on/offline)"
  138. };
  139. #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
  140. /**
  141. * i40e_get_settings - Get Link Speed and Duplex settings
  142. * @netdev: network interface device structure
  143. * @ecmd: ethtool command
  144. *
  145. * Reports speed/duplex settings based on media_type
  146. **/
  147. static int i40e_get_settings(struct net_device *netdev,
  148. struct ethtool_cmd *ecmd)
  149. {
  150. struct i40e_netdev_priv *np = netdev_priv(netdev);
  151. struct i40e_pf *pf = np->vsi->back;
  152. struct i40e_hw *hw = &pf->hw;
  153. struct i40e_link_status *hw_link_info = &hw->phy.link_info;
  154. bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
  155. u32 link_speed = hw_link_info->link_speed;
  156. /* hardware is either in 40G mode or 10G mode
  157. * NOTE: this section initializes supported and advertising
  158. */
  159. switch (hw_link_info->phy_type) {
  160. case I40E_PHY_TYPE_40GBASE_CR4:
  161. case I40E_PHY_TYPE_40GBASE_CR4_CU:
  162. ecmd->supported = SUPPORTED_40000baseCR4_Full;
  163. ecmd->advertising = ADVERTISED_40000baseCR4_Full;
  164. break;
  165. case I40E_PHY_TYPE_40GBASE_KR4:
  166. ecmd->supported = SUPPORTED_40000baseKR4_Full;
  167. ecmd->advertising = ADVERTISED_40000baseKR4_Full;
  168. break;
  169. case I40E_PHY_TYPE_40GBASE_SR4:
  170. ecmd->supported = SUPPORTED_40000baseSR4_Full;
  171. ecmd->advertising = ADVERTISED_40000baseSR4_Full;
  172. break;
  173. case I40E_PHY_TYPE_40GBASE_LR4:
  174. ecmd->supported = SUPPORTED_40000baseLR4_Full;
  175. ecmd->advertising = ADVERTISED_40000baseLR4_Full;
  176. break;
  177. case I40E_PHY_TYPE_10GBASE_KX4:
  178. ecmd->supported = SUPPORTED_10000baseKX4_Full;
  179. ecmd->advertising = ADVERTISED_10000baseKX4_Full;
  180. break;
  181. case I40E_PHY_TYPE_10GBASE_KR:
  182. ecmd->supported = SUPPORTED_10000baseKR_Full;
  183. ecmd->advertising = ADVERTISED_10000baseKR_Full;
  184. break;
  185. default:
  186. if (i40e_is_40G_device(hw->device_id)) {
  187. ecmd->supported = SUPPORTED_40000baseSR4_Full;
  188. ecmd->advertising = ADVERTISED_40000baseSR4_Full;
  189. } else {
  190. ecmd->supported = SUPPORTED_10000baseT_Full;
  191. ecmd->advertising = ADVERTISED_10000baseT_Full;
  192. }
  193. break;
  194. }
  195. ecmd->supported |= SUPPORTED_Autoneg;
  196. ecmd->advertising |= ADVERTISED_Autoneg;
  197. ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
  198. AUTONEG_ENABLE : AUTONEG_DISABLE);
  199. switch (hw->phy.media_type) {
  200. case I40E_MEDIA_TYPE_BACKPLANE:
  201. ecmd->supported |= SUPPORTED_Backplane;
  202. ecmd->advertising |= ADVERTISED_Backplane;
  203. ecmd->port = PORT_NONE;
  204. break;
  205. case I40E_MEDIA_TYPE_BASET:
  206. ecmd->supported |= SUPPORTED_TP;
  207. ecmd->advertising |= ADVERTISED_TP;
  208. ecmd->port = PORT_TP;
  209. break;
  210. case I40E_MEDIA_TYPE_DA:
  211. case I40E_MEDIA_TYPE_CX4:
  212. ecmd->supported |= SUPPORTED_FIBRE;
  213. ecmd->advertising |= ADVERTISED_FIBRE;
  214. ecmd->port = PORT_DA;
  215. break;
  216. case I40E_MEDIA_TYPE_FIBER:
  217. ecmd->supported |= SUPPORTED_FIBRE;
  218. ecmd->advertising |= ADVERTISED_FIBRE;
  219. ecmd->port = PORT_FIBRE;
  220. break;
  221. case I40E_MEDIA_TYPE_UNKNOWN:
  222. default:
  223. ecmd->port = PORT_OTHER;
  224. break;
  225. }
  226. ecmd->transceiver = XCVR_EXTERNAL;
  227. if (link_up) {
  228. switch (link_speed) {
  229. case I40E_LINK_SPEED_40GB:
  230. /* need a SPEED_40000 in ethtool.h */
  231. ethtool_cmd_speed_set(ecmd, 40000);
  232. break;
  233. case I40E_LINK_SPEED_10GB:
  234. ethtool_cmd_speed_set(ecmd, SPEED_10000);
  235. break;
  236. default:
  237. break;
  238. }
  239. ecmd->duplex = DUPLEX_FULL;
  240. } else {
  241. ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
  242. ecmd->duplex = DUPLEX_UNKNOWN;
  243. }
  244. return 0;
  245. }
  246. /**
  247. * i40e_get_pauseparam - Get Flow Control status
  248. * Return tx/rx-pause status
  249. **/
  250. static void i40e_get_pauseparam(struct net_device *netdev,
  251. struct ethtool_pauseparam *pause)
  252. {
  253. struct i40e_netdev_priv *np = netdev_priv(netdev);
  254. struct i40e_pf *pf = np->vsi->back;
  255. struct i40e_hw *hw = &pf->hw;
  256. struct i40e_link_status *hw_link_info = &hw->phy.link_info;
  257. pause->autoneg =
  258. ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
  259. AUTONEG_ENABLE : AUTONEG_DISABLE);
  260. if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
  261. pause->rx_pause = 1;
  262. } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
  263. pause->tx_pause = 1;
  264. } else if (hw->fc.current_mode == I40E_FC_FULL) {
  265. pause->rx_pause = 1;
  266. pause->tx_pause = 1;
  267. }
  268. }
  269. static u32 i40e_get_msglevel(struct net_device *netdev)
  270. {
  271. struct i40e_netdev_priv *np = netdev_priv(netdev);
  272. struct i40e_pf *pf = np->vsi->back;
  273. return pf->msg_enable;
  274. }
  275. static void i40e_set_msglevel(struct net_device *netdev, u32 data)
  276. {
  277. struct i40e_netdev_priv *np = netdev_priv(netdev);
  278. struct i40e_pf *pf = np->vsi->back;
  279. if (I40E_DEBUG_USER & data)
  280. pf->hw.debug_mask = data;
  281. pf->msg_enable = data;
  282. }
  283. static int i40e_get_regs_len(struct net_device *netdev)
  284. {
  285. int reg_count = 0;
  286. int i;
  287. for (i = 0; i40e_reg_list[i].offset != 0; i++)
  288. reg_count += i40e_reg_list[i].elements;
  289. return reg_count * sizeof(u32);
  290. }
  291. static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
  292. void *p)
  293. {
  294. struct i40e_netdev_priv *np = netdev_priv(netdev);
  295. struct i40e_pf *pf = np->vsi->back;
  296. struct i40e_hw *hw = &pf->hw;
  297. u32 *reg_buf = p;
  298. int i, j, ri;
  299. u32 reg;
  300. /* Tell ethtool which driver-version-specific regs output we have.
  301. *
  302. * At some point, if we have ethtool doing special formatting of
  303. * this data, it will rely on this version number to know how to
  304. * interpret things. Hence, this needs to be updated if/when the
  305. * diags register table is changed.
  306. */
  307. regs->version = 1;
  308. /* loop through the diags reg table for what to print */
  309. ri = 0;
  310. for (i = 0; i40e_reg_list[i].offset != 0; i++) {
  311. for (j = 0; j < i40e_reg_list[i].elements; j++) {
  312. reg = i40e_reg_list[i].offset
  313. + (j * i40e_reg_list[i].stride);
  314. reg_buf[ri++] = rd32(hw, reg);
  315. }
  316. }
  317. }
  318. static int i40e_get_eeprom(struct net_device *netdev,
  319. struct ethtool_eeprom *eeprom, u8 *bytes)
  320. {
  321. struct i40e_netdev_priv *np = netdev_priv(netdev);
  322. struct i40e_hw *hw = &np->vsi->back->hw;
  323. struct i40e_pf *pf = np->vsi->back;
  324. int ret_val = 0, len;
  325. u8 *eeprom_buff;
  326. u16 i, sectors;
  327. bool last;
  328. #define I40E_NVM_SECTOR_SIZE 4096
  329. if (eeprom->len == 0)
  330. return -EINVAL;
  331. eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  332. eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
  333. if (!eeprom_buff)
  334. return -ENOMEM;
  335. ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  336. if (ret_val) {
  337. dev_info(&pf->pdev->dev,
  338. "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
  339. ret_val, hw->aq.asq_last_status);
  340. goto free_buff;
  341. }
  342. sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
  343. sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
  344. len = I40E_NVM_SECTOR_SIZE;
  345. last = false;
  346. for (i = 0; i < sectors; i++) {
  347. if (i == (sectors - 1)) {
  348. len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
  349. last = true;
  350. }
  351. ret_val = i40e_aq_read_nvm(hw, 0x0,
  352. eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
  353. len,
  354. (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
  355. last, NULL);
  356. if (ret_val) {
  357. dev_info(&pf->pdev->dev,
  358. "read NVM failed err=%d status=0x%x\n",
  359. ret_val, hw->aq.asq_last_status);
  360. goto release_nvm;
  361. }
  362. }
  363. release_nvm:
  364. i40e_release_nvm(hw);
  365. memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
  366. free_buff:
  367. kfree(eeprom_buff);
  368. return ret_val;
  369. }
  370. static int i40e_get_eeprom_len(struct net_device *netdev)
  371. {
  372. struct i40e_netdev_priv *np = netdev_priv(netdev);
  373. struct i40e_hw *hw = &np->vsi->back->hw;
  374. u32 val;
  375. val = (rd32(hw, I40E_GLPCI_LBARCTRL)
  376. & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
  377. >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
  378. /* register returns value in power of 2, 64Kbyte chunks. */
  379. val = (64 * 1024) * (1 << val);
  380. return val;
  381. }
  382. static void i40e_get_drvinfo(struct net_device *netdev,
  383. struct ethtool_drvinfo *drvinfo)
  384. {
  385. struct i40e_netdev_priv *np = netdev_priv(netdev);
  386. struct i40e_vsi *vsi = np->vsi;
  387. struct i40e_pf *pf = vsi->back;
  388. strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
  389. strlcpy(drvinfo->version, i40e_driver_version_str,
  390. sizeof(drvinfo->version));
  391. strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
  392. sizeof(drvinfo->fw_version));
  393. strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
  394. sizeof(drvinfo->bus_info));
  395. }
  396. static void i40e_get_ringparam(struct net_device *netdev,
  397. struct ethtool_ringparam *ring)
  398. {
  399. struct i40e_netdev_priv *np = netdev_priv(netdev);
  400. struct i40e_pf *pf = np->vsi->back;
  401. struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
  402. ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
  403. ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
  404. ring->rx_mini_max_pending = 0;
  405. ring->rx_jumbo_max_pending = 0;
  406. ring->rx_pending = vsi->rx_rings[0]->count;
  407. ring->tx_pending = vsi->tx_rings[0]->count;
  408. ring->rx_mini_pending = 0;
  409. ring->rx_jumbo_pending = 0;
  410. }
  411. static int i40e_set_ringparam(struct net_device *netdev,
  412. struct ethtool_ringparam *ring)
  413. {
  414. struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
  415. struct i40e_netdev_priv *np = netdev_priv(netdev);
  416. struct i40e_vsi *vsi = np->vsi;
  417. struct i40e_pf *pf = vsi->back;
  418. u32 new_rx_count, new_tx_count;
  419. int i, err = 0;
  420. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  421. return -EINVAL;
  422. if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
  423. ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
  424. ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
  425. ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
  426. netdev_info(netdev,
  427. "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
  428. ring->tx_pending, ring->rx_pending,
  429. I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
  430. return -EINVAL;
  431. }
  432. new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
  433. new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
  434. /* if nothing to do return success */
  435. if ((new_tx_count == vsi->tx_rings[0]->count) &&
  436. (new_rx_count == vsi->rx_rings[0]->count))
  437. return 0;
  438. while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
  439. usleep_range(1000, 2000);
  440. if (!netif_running(vsi->netdev)) {
  441. /* simple case - set for the next time the netdev is started */
  442. for (i = 0; i < vsi->num_queue_pairs; i++) {
  443. vsi->tx_rings[i]->count = new_tx_count;
  444. vsi->rx_rings[i]->count = new_rx_count;
  445. }
  446. goto done;
  447. }
  448. /* We can't just free everything and then setup again,
  449. * because the ISRs in MSI-X mode get passed pointers
  450. * to the Tx and Rx ring structs.
  451. */
  452. /* alloc updated Tx resources */
  453. if (new_tx_count != vsi->tx_rings[0]->count) {
  454. netdev_info(netdev,
  455. "Changing Tx descriptor count from %d to %d.\n",
  456. vsi->tx_rings[0]->count, new_tx_count);
  457. tx_rings = kcalloc(vsi->alloc_queue_pairs,
  458. sizeof(struct i40e_ring), GFP_KERNEL);
  459. if (!tx_rings) {
  460. err = -ENOMEM;
  461. goto done;
  462. }
  463. for (i = 0; i < vsi->num_queue_pairs; i++) {
  464. /* clone ring and setup updated count */
  465. tx_rings[i] = *vsi->tx_rings[i];
  466. tx_rings[i].count = new_tx_count;
  467. err = i40e_setup_tx_descriptors(&tx_rings[i]);
  468. if (err) {
  469. while (i) {
  470. i--;
  471. i40e_free_tx_resources(&tx_rings[i]);
  472. }
  473. kfree(tx_rings);
  474. tx_rings = NULL;
  475. goto done;
  476. }
  477. }
  478. }
  479. /* alloc updated Rx resources */
  480. if (new_rx_count != vsi->rx_rings[0]->count) {
  481. netdev_info(netdev,
  482. "Changing Rx descriptor count from %d to %d\n",
  483. vsi->rx_rings[0]->count, new_rx_count);
  484. rx_rings = kcalloc(vsi->alloc_queue_pairs,
  485. sizeof(struct i40e_ring), GFP_KERNEL);
  486. if (!rx_rings) {
  487. err = -ENOMEM;
  488. goto free_tx;
  489. }
  490. for (i = 0; i < vsi->num_queue_pairs; i++) {
  491. /* clone ring and setup updated count */
  492. rx_rings[i] = *vsi->rx_rings[i];
  493. rx_rings[i].count = new_rx_count;
  494. err = i40e_setup_rx_descriptors(&rx_rings[i]);
  495. if (err) {
  496. while (i) {
  497. i--;
  498. i40e_free_rx_resources(&rx_rings[i]);
  499. }
  500. kfree(rx_rings);
  501. rx_rings = NULL;
  502. goto free_tx;
  503. }
  504. }
  505. }
  506. /* Bring interface down, copy in the new ring info,
  507. * then restore the interface
  508. */
  509. i40e_down(vsi);
  510. if (tx_rings) {
  511. for (i = 0; i < vsi->num_queue_pairs; i++) {
  512. i40e_free_tx_resources(vsi->tx_rings[i]);
  513. *vsi->tx_rings[i] = tx_rings[i];
  514. }
  515. kfree(tx_rings);
  516. tx_rings = NULL;
  517. }
  518. if (rx_rings) {
  519. for (i = 0; i < vsi->num_queue_pairs; i++) {
  520. i40e_free_rx_resources(vsi->rx_rings[i]);
  521. *vsi->rx_rings[i] = rx_rings[i];
  522. }
  523. kfree(rx_rings);
  524. rx_rings = NULL;
  525. }
  526. i40e_up(vsi);
  527. free_tx:
  528. /* error cleanup if the Rx allocations failed after getting Tx */
  529. if (tx_rings) {
  530. for (i = 0; i < vsi->num_queue_pairs; i++)
  531. i40e_free_tx_resources(&tx_rings[i]);
  532. kfree(tx_rings);
  533. tx_rings = NULL;
  534. }
  535. done:
  536. clear_bit(__I40E_CONFIG_BUSY, &pf->state);
  537. return err;
  538. }
  539. static int i40e_get_sset_count(struct net_device *netdev, int sset)
  540. {
  541. struct i40e_netdev_priv *np = netdev_priv(netdev);
  542. struct i40e_vsi *vsi = np->vsi;
  543. struct i40e_pf *pf = vsi->back;
  544. switch (sset) {
  545. case ETH_SS_TEST:
  546. return I40E_TEST_LEN;
  547. case ETH_SS_STATS:
  548. if (vsi == pf->vsi[pf->lan_vsi])
  549. return I40E_PF_STATS_LEN(netdev);
  550. else
  551. return I40E_VSI_STATS_LEN(netdev);
  552. default:
  553. return -EOPNOTSUPP;
  554. }
  555. }
  556. static void i40e_get_ethtool_stats(struct net_device *netdev,
  557. struct ethtool_stats *stats, u64 *data)
  558. {
  559. struct i40e_netdev_priv *np = netdev_priv(netdev);
  560. struct i40e_vsi *vsi = np->vsi;
  561. struct i40e_pf *pf = vsi->back;
  562. int i = 0;
  563. char *p;
  564. int j;
  565. struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
  566. unsigned int start;
  567. i40e_update_stats(vsi);
  568. for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
  569. p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
  570. data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
  571. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  572. }
  573. rcu_read_lock();
  574. for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
  575. struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
  576. struct i40e_ring *rx_ring;
  577. if (!tx_ring)
  578. continue;
  579. /* process Tx ring statistics */
  580. do {
  581. start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
  582. data[i] = tx_ring->stats.packets;
  583. data[i + 1] = tx_ring->stats.bytes;
  584. } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
  585. /* Rx ring is the 2nd half of the queue pair */
  586. rx_ring = &tx_ring[1];
  587. do {
  588. start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
  589. data[i + 2] = rx_ring->stats.packets;
  590. data[i + 3] = rx_ring->stats.bytes;
  591. } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
  592. }
  593. rcu_read_unlock();
  594. if (vsi == pf->vsi[pf->lan_vsi]) {
  595. for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
  596. p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
  597. data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
  598. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  599. }
  600. for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
  601. data[i++] = pf->stats.priority_xon_tx[j];
  602. data[i++] = pf->stats.priority_xoff_tx[j];
  603. }
  604. for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
  605. data[i++] = pf->stats.priority_xon_rx[j];
  606. data[i++] = pf->stats.priority_xoff_rx[j];
  607. }
  608. for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
  609. data[i++] = pf->stats.priority_xon_2_xoff[j];
  610. }
  611. }
  612. static void i40e_get_strings(struct net_device *netdev, u32 stringset,
  613. u8 *data)
  614. {
  615. struct i40e_netdev_priv *np = netdev_priv(netdev);
  616. struct i40e_vsi *vsi = np->vsi;
  617. struct i40e_pf *pf = vsi->back;
  618. char *p = (char *)data;
  619. int i;
  620. switch (stringset) {
  621. case ETH_SS_TEST:
  622. for (i = 0; i < I40E_TEST_LEN; i++) {
  623. memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
  624. data += ETH_GSTRING_LEN;
  625. }
  626. break;
  627. case ETH_SS_STATS:
  628. for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
  629. snprintf(p, ETH_GSTRING_LEN, "%s",
  630. i40e_gstrings_net_stats[i].stat_string);
  631. p += ETH_GSTRING_LEN;
  632. }
  633. for (i = 0; i < vsi->num_queue_pairs; i++) {
  634. snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
  635. p += ETH_GSTRING_LEN;
  636. snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
  637. p += ETH_GSTRING_LEN;
  638. snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
  639. p += ETH_GSTRING_LEN;
  640. snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
  641. p += ETH_GSTRING_LEN;
  642. }
  643. if (vsi == pf->vsi[pf->lan_vsi]) {
  644. for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
  645. snprintf(p, ETH_GSTRING_LEN, "port.%s",
  646. i40e_gstrings_stats[i].stat_string);
  647. p += ETH_GSTRING_LEN;
  648. }
  649. for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
  650. snprintf(p, ETH_GSTRING_LEN,
  651. "port.tx_priority_%u_xon", i);
  652. p += ETH_GSTRING_LEN;
  653. snprintf(p, ETH_GSTRING_LEN,
  654. "port.tx_priority_%u_xoff", i);
  655. p += ETH_GSTRING_LEN;
  656. }
  657. for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
  658. snprintf(p, ETH_GSTRING_LEN,
  659. "port.rx_priority_%u_xon", i);
  660. p += ETH_GSTRING_LEN;
  661. snprintf(p, ETH_GSTRING_LEN,
  662. "port.rx_priority_%u_xoff", i);
  663. p += ETH_GSTRING_LEN;
  664. }
  665. for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
  666. snprintf(p, ETH_GSTRING_LEN,
  667. "port.rx_priority_%u_xon_2_xoff", i);
  668. p += ETH_GSTRING_LEN;
  669. }
  670. }
  671. /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
  672. break;
  673. }
  674. }
  675. static int i40e_get_ts_info(struct net_device *dev,
  676. struct ethtool_ts_info *info)
  677. {
  678. struct i40e_pf *pf = i40e_netdev_to_pf(dev);
  679. info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
  680. SOF_TIMESTAMPING_RX_SOFTWARE |
  681. SOF_TIMESTAMPING_SOFTWARE |
  682. SOF_TIMESTAMPING_TX_HARDWARE |
  683. SOF_TIMESTAMPING_RX_HARDWARE |
  684. SOF_TIMESTAMPING_RAW_HARDWARE;
  685. if (pf->ptp_clock)
  686. info->phc_index = ptp_clock_index(pf->ptp_clock);
  687. else
  688. info->phc_index = -1;
  689. info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
  690. info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  691. (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
  692. (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
  693. (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
  694. (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
  695. (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
  696. (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
  697. (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
  698. (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
  699. (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
  700. (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
  701. (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
  702. return 0;
  703. }
  704. static int i40e_link_test(struct net_device *netdev, u64 *data)
  705. {
  706. struct i40e_netdev_priv *np = netdev_priv(netdev);
  707. struct i40e_pf *pf = np->vsi->back;
  708. netif_info(pf, hw, netdev, "link test\n");
  709. if (i40e_get_link_status(&pf->hw))
  710. *data = 0;
  711. else
  712. *data = 1;
  713. return *data;
  714. }
  715. static int i40e_reg_test(struct net_device *netdev, u64 *data)
  716. {
  717. struct i40e_netdev_priv *np = netdev_priv(netdev);
  718. struct i40e_pf *pf = np->vsi->back;
  719. netif_info(pf, hw, netdev, "register test\n");
  720. *data = i40e_diag_reg_test(&pf->hw);
  721. return *data;
  722. }
  723. static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
  724. {
  725. struct i40e_netdev_priv *np = netdev_priv(netdev);
  726. struct i40e_pf *pf = np->vsi->back;
  727. netif_info(pf, hw, netdev, "eeprom test\n");
  728. *data = i40e_diag_eeprom_test(&pf->hw);
  729. return *data;
  730. }
  731. static int i40e_intr_test(struct net_device *netdev, u64 *data)
  732. {
  733. struct i40e_netdev_priv *np = netdev_priv(netdev);
  734. struct i40e_pf *pf = np->vsi->back;
  735. u16 swc_old = pf->sw_int_count;
  736. netif_info(pf, hw, netdev, "interrupt test\n");
  737. wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
  738. (I40E_PFINT_DYN_CTL0_INTENA_MASK |
  739. I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
  740. usleep_range(1000, 2000);
  741. *data = (swc_old == pf->sw_int_count);
  742. return *data;
  743. }
  744. static int i40e_loopback_test(struct net_device *netdev, u64 *data)
  745. {
  746. struct i40e_netdev_priv *np = netdev_priv(netdev);
  747. struct i40e_pf *pf = np->vsi->back;
  748. netif_info(pf, hw, netdev, "loopback test not implemented\n");
  749. *data = 0;
  750. return *data;
  751. }
  752. static void i40e_diag_test(struct net_device *netdev,
  753. struct ethtool_test *eth_test, u64 *data)
  754. {
  755. struct i40e_netdev_priv *np = netdev_priv(netdev);
  756. struct i40e_pf *pf = np->vsi->back;
  757. if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
  758. /* Offline tests */
  759. netif_info(pf, drv, netdev, "offline testing starting\n");
  760. set_bit(__I40E_TESTING, &pf->state);
  761. /* Link test performed before hardware reset
  762. * so autoneg doesn't interfere with test result
  763. */
  764. if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
  765. eth_test->flags |= ETH_TEST_FL_FAILED;
  766. if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
  767. eth_test->flags |= ETH_TEST_FL_FAILED;
  768. if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
  769. eth_test->flags |= ETH_TEST_FL_FAILED;
  770. if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
  771. eth_test->flags |= ETH_TEST_FL_FAILED;
  772. /* run reg test last, a reset is required after it */
  773. if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
  774. eth_test->flags |= ETH_TEST_FL_FAILED;
  775. clear_bit(__I40E_TESTING, &pf->state);
  776. i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
  777. } else {
  778. /* Online tests */
  779. netif_info(pf, drv, netdev, "online testing starting\n");
  780. if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
  781. eth_test->flags |= ETH_TEST_FL_FAILED;
  782. /* Offline only tests, not run in online; pass by default */
  783. data[I40E_ETH_TEST_REG] = 0;
  784. data[I40E_ETH_TEST_EEPROM] = 0;
  785. data[I40E_ETH_TEST_INTR] = 0;
  786. data[I40E_ETH_TEST_LOOPBACK] = 0;
  787. }
  788. netif_info(pf, drv, netdev, "testing finished\n");
  789. }
  790. static void i40e_get_wol(struct net_device *netdev,
  791. struct ethtool_wolinfo *wol)
  792. {
  793. struct i40e_netdev_priv *np = netdev_priv(netdev);
  794. struct i40e_pf *pf = np->vsi->back;
  795. struct i40e_hw *hw = &pf->hw;
  796. u16 wol_nvm_bits;
  797. /* NVM bit on means WoL disabled for the port */
  798. i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
  799. if ((1 << hw->port) & wol_nvm_bits) {
  800. wol->supported = 0;
  801. wol->wolopts = 0;
  802. } else {
  803. wol->supported = WAKE_MAGIC;
  804. wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
  805. }
  806. }
  807. static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  808. {
  809. struct i40e_netdev_priv *np = netdev_priv(netdev);
  810. struct i40e_pf *pf = np->vsi->back;
  811. struct i40e_hw *hw = &pf->hw;
  812. u16 wol_nvm_bits;
  813. /* NVM bit on means WoL disabled for the port */
  814. i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
  815. if (((1 << hw->port) & wol_nvm_bits))
  816. return -EOPNOTSUPP;
  817. /* only magic packet is supported */
  818. if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
  819. return -EOPNOTSUPP;
  820. /* is this a new value? */
  821. if (pf->wol_en != !!wol->wolopts) {
  822. pf->wol_en = !!wol->wolopts;
  823. device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
  824. }
  825. return 0;
  826. }
  827. static int i40e_nway_reset(struct net_device *netdev)
  828. {
  829. /* restart autonegotiation */
  830. struct i40e_netdev_priv *np = netdev_priv(netdev);
  831. struct i40e_pf *pf = np->vsi->back;
  832. struct i40e_hw *hw = &pf->hw;
  833. i40e_status ret = 0;
  834. ret = i40e_aq_set_link_restart_an(hw, NULL);
  835. if (ret) {
  836. netdev_info(netdev, "link restart failed, aq_err=%d\n",
  837. pf->hw.aq.asq_last_status);
  838. return -EIO;
  839. }
  840. return 0;
  841. }
  842. static int i40e_set_phys_id(struct net_device *netdev,
  843. enum ethtool_phys_id_state state)
  844. {
  845. struct i40e_netdev_priv *np = netdev_priv(netdev);
  846. struct i40e_pf *pf = np->vsi->back;
  847. struct i40e_hw *hw = &pf->hw;
  848. int blink_freq = 2;
  849. switch (state) {
  850. case ETHTOOL_ID_ACTIVE:
  851. pf->led_status = i40e_led_get(hw);
  852. return blink_freq;
  853. case ETHTOOL_ID_ON:
  854. i40e_led_set(hw, 0xF, false);
  855. break;
  856. case ETHTOOL_ID_OFF:
  857. i40e_led_set(hw, 0x0, false);
  858. break;
  859. case ETHTOOL_ID_INACTIVE:
  860. i40e_led_set(hw, pf->led_status, false);
  861. break;
  862. }
  863. return 0;
  864. }
  865. /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
  866. * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
  867. * 125us (8000 interrupts per second) == ITR(62)
  868. */
  869. static int i40e_get_coalesce(struct net_device *netdev,
  870. struct ethtool_coalesce *ec)
  871. {
  872. struct i40e_netdev_priv *np = netdev_priv(netdev);
  873. struct i40e_vsi *vsi = np->vsi;
  874. ec->tx_max_coalesced_frames_irq = vsi->work_limit;
  875. ec->rx_max_coalesced_frames_irq = vsi->work_limit;
  876. if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
  877. ec->rx_coalesce_usecs = 1;
  878. else
  879. ec->rx_coalesce_usecs = vsi->rx_itr_setting;
  880. if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
  881. ec->tx_coalesce_usecs = 1;
  882. else
  883. ec->tx_coalesce_usecs = vsi->tx_itr_setting;
  884. return 0;
  885. }
  886. static int i40e_set_coalesce(struct net_device *netdev,
  887. struct ethtool_coalesce *ec)
  888. {
  889. struct i40e_netdev_priv *np = netdev_priv(netdev);
  890. struct i40e_q_vector *q_vector;
  891. struct i40e_vsi *vsi = np->vsi;
  892. struct i40e_pf *pf = vsi->back;
  893. struct i40e_hw *hw = &pf->hw;
  894. u16 vector;
  895. int i;
  896. if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
  897. vsi->work_limit = ec->tx_max_coalesced_frames_irq;
  898. switch (ec->rx_coalesce_usecs) {
  899. case 0:
  900. vsi->rx_itr_setting = 0;
  901. break;
  902. case 1:
  903. vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
  904. ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
  905. break;
  906. default:
  907. if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
  908. (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
  909. return -EINVAL;
  910. vsi->rx_itr_setting = ec->rx_coalesce_usecs;
  911. break;
  912. }
  913. switch (ec->tx_coalesce_usecs) {
  914. case 0:
  915. vsi->tx_itr_setting = 0;
  916. break;
  917. case 1:
  918. vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
  919. ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
  920. break;
  921. default:
  922. if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
  923. (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
  924. return -EINVAL;
  925. vsi->tx_itr_setting = ec->tx_coalesce_usecs;
  926. break;
  927. }
  928. vector = vsi->base_vector;
  929. for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
  930. q_vector = vsi->q_vectors[i];
  931. q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
  932. wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
  933. q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
  934. wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
  935. i40e_flush(hw);
  936. }
  937. return 0;
  938. }
  939. /**
  940. * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
  941. * @pf: pointer to the physical function struct
  942. * @cmd: ethtool rxnfc command
  943. *
  944. * Returns Success if the flow is supported, else Invalid Input.
  945. **/
  946. static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
  947. {
  948. cmd->data = 0;
  949. /* Report default options for RSS on i40e */
  950. switch (cmd->flow_type) {
  951. case TCP_V4_FLOW:
  952. case UDP_V4_FLOW:
  953. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  954. /* fall through to add IP fields */
  955. case SCTP_V4_FLOW:
  956. case AH_ESP_V4_FLOW:
  957. case AH_V4_FLOW:
  958. case ESP_V4_FLOW:
  959. case IPV4_FLOW:
  960. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  961. break;
  962. case TCP_V6_FLOW:
  963. case UDP_V6_FLOW:
  964. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  965. /* fall through to add IP fields */
  966. case SCTP_V6_FLOW:
  967. case AH_ESP_V6_FLOW:
  968. case AH_V6_FLOW:
  969. case ESP_V6_FLOW:
  970. case IPV6_FLOW:
  971. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  972. break;
  973. default:
  974. return -EINVAL;
  975. }
  976. return 0;
  977. }
  978. /**
  979. * i40e_get_rxnfc - command to get RX flow classification rules
  980. * @netdev: network interface device structure
  981. * @cmd: ethtool rxnfc command
  982. *
  983. * Returns Success if the command is supported.
  984. **/
  985. static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
  986. u32 *rule_locs)
  987. {
  988. struct i40e_netdev_priv *np = netdev_priv(netdev);
  989. struct i40e_vsi *vsi = np->vsi;
  990. struct i40e_pf *pf = vsi->back;
  991. int ret = -EOPNOTSUPP;
  992. switch (cmd->cmd) {
  993. case ETHTOOL_GRXRINGS:
  994. cmd->data = vsi->alloc_queue_pairs;
  995. ret = 0;
  996. break;
  997. case ETHTOOL_GRXFH:
  998. ret = i40e_get_rss_hash_opts(pf, cmd);
  999. break;
  1000. case ETHTOOL_GRXCLSRLCNT:
  1001. cmd->rule_cnt = 10;
  1002. ret = 0;
  1003. break;
  1004. case ETHTOOL_GRXCLSRULE:
  1005. ret = 0;
  1006. break;
  1007. case ETHTOOL_GRXCLSRLALL:
  1008. cmd->data = 500;
  1009. ret = 0;
  1010. default:
  1011. break;
  1012. }
  1013. return ret;
  1014. }
  1015. /**
  1016. * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
  1017. * @pf: pointer to the physical function struct
  1018. * @cmd: ethtool rxnfc command
  1019. *
  1020. * Returns Success if the flow input set is supported.
  1021. **/
  1022. static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
  1023. {
  1024. struct i40e_hw *hw = &pf->hw;
  1025. u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
  1026. ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
  1027. /* RSS does not support anything other than hashing
  1028. * to queues on src and dst IPs and ports
  1029. */
  1030. if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
  1031. RXH_L4_B_0_1 | RXH_L4_B_2_3))
  1032. return -EINVAL;
  1033. /* We need at least the IP SRC and DEST fields for hashing */
  1034. if (!(nfc->data & RXH_IP_SRC) ||
  1035. !(nfc->data & RXH_IP_DST))
  1036. return -EINVAL;
  1037. switch (nfc->flow_type) {
  1038. case TCP_V4_FLOW:
  1039. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  1040. case 0:
  1041. hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
  1042. break;
  1043. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  1044. hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
  1045. break;
  1046. default:
  1047. return -EINVAL;
  1048. }
  1049. break;
  1050. case TCP_V6_FLOW:
  1051. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  1052. case 0:
  1053. hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
  1054. break;
  1055. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  1056. hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
  1057. break;
  1058. default:
  1059. return -EINVAL;
  1060. }
  1061. break;
  1062. case UDP_V4_FLOW:
  1063. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  1064. case 0:
  1065. hena &=
  1066. ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
  1067. ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
  1068. ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
  1069. break;
  1070. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  1071. hena |=
  1072. (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
  1073. ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
  1074. ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
  1075. break;
  1076. default:
  1077. return -EINVAL;
  1078. }
  1079. break;
  1080. case UDP_V6_FLOW:
  1081. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  1082. case 0:
  1083. hena &=
  1084. ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
  1085. ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
  1086. ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
  1087. break;
  1088. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  1089. hena |=
  1090. (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
  1091. ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
  1092. ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
  1093. break;
  1094. default:
  1095. return -EINVAL;
  1096. }
  1097. break;
  1098. case AH_ESP_V4_FLOW:
  1099. case AH_V4_FLOW:
  1100. case ESP_V4_FLOW:
  1101. case SCTP_V4_FLOW:
  1102. if ((nfc->data & RXH_L4_B_0_1) ||
  1103. (nfc->data & RXH_L4_B_2_3))
  1104. return -EINVAL;
  1105. hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
  1106. break;
  1107. case AH_ESP_V6_FLOW:
  1108. case AH_V6_FLOW:
  1109. case ESP_V6_FLOW:
  1110. case SCTP_V6_FLOW:
  1111. if ((nfc->data & RXH_L4_B_0_1) ||
  1112. (nfc->data & RXH_L4_B_2_3))
  1113. return -EINVAL;
  1114. hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
  1115. break;
  1116. case IPV4_FLOW:
  1117. hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
  1118. ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
  1119. break;
  1120. case IPV6_FLOW:
  1121. hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
  1122. ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
  1123. break;
  1124. default:
  1125. return -EINVAL;
  1126. }
  1127. wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
  1128. wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
  1129. i40e_flush(hw);
  1130. return 0;
  1131. }
  1132. #define IP_HEADER_OFFSET 14
  1133. #define I40E_UDPIP_DUMMY_PACKET_LEN 42
  1134. /**
  1135. * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
  1136. * a specific flow spec
  1137. * @vsi: pointer to the targeted VSI
  1138. * @fd_data: the flow director data required from the FDir descriptor
  1139. * @ethtool_rx_flow_spec: the flow spec
  1140. * @add: true adds a filter, false removes it
  1141. *
  1142. * Returns 0 if the filters were successfully added or removed
  1143. **/
  1144. static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
  1145. struct i40e_fdir_data *fd_data,
  1146. struct ethtool_rx_flow_spec *fsp, bool add)
  1147. {
  1148. struct i40e_pf *pf = vsi->back;
  1149. struct udphdr *udp;
  1150. struct iphdr *ip;
  1151. bool err = false;
  1152. int ret;
  1153. int i;
  1154. char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  1155. 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
  1156. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1157. 0, 0, 0, 0, 0, 0, 0, 0};
  1158. memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
  1159. ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
  1160. udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
  1161. + sizeof(struct iphdr));
  1162. ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
  1163. ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
  1164. udp->source = fsp->h_u.tcp_ip4_spec.psrc;
  1165. udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
  1166. for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
  1167. i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
  1168. fd_data->pctype = i;
  1169. ret = i40e_program_fdir_filter(fd_data, pf, add);
  1170. if (ret) {
  1171. dev_info(&pf->pdev->dev,
  1172. "Filter command send failed for PCTYPE %d (ret = %d)\n",
  1173. fd_data->pctype, ret);
  1174. err = true;
  1175. } else {
  1176. dev_info(&pf->pdev->dev,
  1177. "Filter OK for PCTYPE %d (ret = %d)\n",
  1178. fd_data->pctype, ret);
  1179. }
  1180. }
  1181. return err ? -EOPNOTSUPP : 0;
  1182. }
  1183. #define I40E_TCPIP_DUMMY_PACKET_LEN 54
  1184. /**
  1185. * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
  1186. * a specific flow spec
  1187. * @vsi: pointer to the targeted VSI
  1188. * @fd_data: the flow director data required from the FDir descriptor
  1189. * @ethtool_rx_flow_spec: the flow spec
  1190. * @add: true adds a filter, false removes it
  1191. *
  1192. * Returns 0 if the filters were successfully added or removed
  1193. **/
  1194. static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
  1195. struct i40e_fdir_data *fd_data,
  1196. struct ethtool_rx_flow_spec *fsp, bool add)
  1197. {
  1198. struct i40e_pf *pf = vsi->back;
  1199. struct tcphdr *tcp;
  1200. struct iphdr *ip;
  1201. bool err = false;
  1202. int ret;
  1203. /* Dummy packet */
  1204. char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  1205. 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
  1206. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1207. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1208. 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
  1209. memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
  1210. ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
  1211. tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
  1212. + sizeof(struct iphdr));
  1213. ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
  1214. tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
  1215. ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
  1216. tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
  1217. if (add) {
  1218. if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
  1219. dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
  1220. pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
  1221. }
  1222. }
  1223. fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
  1224. ret = i40e_program_fdir_filter(fd_data, pf, add);
  1225. if (ret) {
  1226. dev_info(&pf->pdev->dev,
  1227. "Filter command send failed for PCTYPE %d (ret = %d)\n",
  1228. fd_data->pctype, ret);
  1229. err = true;
  1230. } else {
  1231. dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
  1232. fd_data->pctype, ret);
  1233. }
  1234. fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
  1235. ret = i40e_program_fdir_filter(fd_data, pf, add);
  1236. if (ret) {
  1237. dev_info(&pf->pdev->dev,
  1238. "Filter command send failed for PCTYPE %d (ret = %d)\n",
  1239. fd_data->pctype, ret);
  1240. err = true;
  1241. } else {
  1242. dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
  1243. fd_data->pctype, ret);
  1244. }
  1245. return err ? -EOPNOTSUPP : 0;
  1246. }
  1247. /**
  1248. * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
  1249. * a specific flow spec
  1250. * @vsi: pointer to the targeted VSI
  1251. * @fd_data: the flow director data required from the FDir descriptor
  1252. * @ethtool_rx_flow_spec: the flow spec
  1253. * @add: true adds a filter, false removes it
  1254. *
  1255. * Returns 0 if the filters were successfully added or removed
  1256. **/
  1257. static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
  1258. struct i40e_fdir_data *fd_data,
  1259. struct ethtool_rx_flow_spec *fsp, bool add)
  1260. {
  1261. return -EOPNOTSUPP;
  1262. }
  1263. #define I40E_IP_DUMMY_PACKET_LEN 34
  1264. /**
  1265. * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
  1266. * a specific flow spec
  1267. * @vsi: pointer to the targeted VSI
  1268. * @fd_data: the flow director data required for the FDir descriptor
  1269. * @fsp: the ethtool flow spec
  1270. * @add: true adds a filter, false removes it
  1271. *
  1272. * Returns 0 if the filters were successfully added or removed
  1273. **/
  1274. static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
  1275. struct i40e_fdir_data *fd_data,
  1276. struct ethtool_rx_flow_spec *fsp, bool add)
  1277. {
  1278. struct i40e_pf *pf = vsi->back;
  1279. struct iphdr *ip;
  1280. bool err = false;
  1281. int ret;
  1282. int i;
  1283. char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  1284. 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
  1285. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  1286. memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
  1287. ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
  1288. ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
  1289. ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
  1290. ip->protocol = fsp->h_u.usr_ip4_spec.proto;
  1291. for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
  1292. i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
  1293. fd_data->pctype = i;
  1294. ret = i40e_program_fdir_filter(fd_data, pf, add);
  1295. if (ret) {
  1296. dev_info(&pf->pdev->dev,
  1297. "Filter command send failed for PCTYPE %d (ret = %d)\n",
  1298. fd_data->pctype, ret);
  1299. err = true;
  1300. } else {
  1301. dev_info(&pf->pdev->dev,
  1302. "Filter OK for PCTYPE %d (ret = %d)\n",
  1303. fd_data->pctype, ret);
  1304. }
  1305. }
  1306. return err ? -EOPNOTSUPP : 0;
  1307. }
  1308. /**
  1309. * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
  1310. * a specific flow spec based on their protocol
  1311. * @vsi: pointer to the targeted VSI
  1312. * @cmd: command to get or set RX flow classification rules
  1313. * @add: true adds a filter, false removes it
  1314. *
  1315. * Returns 0 if the filters were successfully added or removed
  1316. **/
  1317. static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
  1318. struct ethtool_rxnfc *cmd, bool add)
  1319. {
  1320. struct i40e_fdir_data fd_data;
  1321. int ret = -EINVAL;
  1322. struct i40e_pf *pf;
  1323. struct ethtool_rx_flow_spec *fsp =
  1324. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1325. if (!vsi)
  1326. return -EINVAL;
  1327. pf = vsi->back;
  1328. if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
  1329. (fsp->ring_cookie >= vsi->num_queue_pairs))
  1330. return -EINVAL;
  1331. /* Populate the Flow Director that we have at the moment
  1332. * and allocate the raw packet buffer for the calling functions
  1333. */
  1334. fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
  1335. GFP_KERNEL);
  1336. if (!fd_data.raw_packet) {
  1337. dev_info(&pf->pdev->dev, "Could not allocate memory\n");
  1338. return -ENOMEM;
  1339. }
  1340. fd_data.q_index = fsp->ring_cookie;
  1341. fd_data.flex_off = 0;
  1342. fd_data.pctype = 0;
  1343. fd_data.dest_vsi = vsi->id;
  1344. fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
  1345. fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
  1346. fd_data.cnt_index = 0;
  1347. fd_data.fd_id = 0;
  1348. switch (fsp->flow_type & ~FLOW_EXT) {
  1349. case TCP_V4_FLOW:
  1350. ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
  1351. break;
  1352. case UDP_V4_FLOW:
  1353. ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
  1354. break;
  1355. case SCTP_V4_FLOW:
  1356. ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
  1357. break;
  1358. case IPV4_FLOW:
  1359. ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
  1360. break;
  1361. case IP_USER_FLOW:
  1362. switch (fsp->h_u.usr_ip4_spec.proto) {
  1363. case IPPROTO_TCP:
  1364. ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
  1365. break;
  1366. case IPPROTO_UDP:
  1367. ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
  1368. break;
  1369. case IPPROTO_SCTP:
  1370. ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
  1371. break;
  1372. default:
  1373. ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
  1374. break;
  1375. }
  1376. break;
  1377. default:
  1378. dev_info(&pf->pdev->dev, "Could not specify spec type\n");
  1379. ret = -EINVAL;
  1380. }
  1381. kfree(fd_data.raw_packet);
  1382. fd_data.raw_packet = NULL;
  1383. return ret;
  1384. }
  1385. /**
  1386. * i40e_set_rxnfc - command to set RX flow classification rules
  1387. * @netdev: network interface device structure
  1388. * @cmd: ethtool rxnfc command
  1389. *
  1390. * Returns Success if the command is supported.
  1391. **/
  1392. static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
  1393. {
  1394. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1395. struct i40e_vsi *vsi = np->vsi;
  1396. struct i40e_pf *pf = vsi->back;
  1397. int ret = -EOPNOTSUPP;
  1398. switch (cmd->cmd) {
  1399. case ETHTOOL_SRXFH:
  1400. ret = i40e_set_rss_hash_opt(pf, cmd);
  1401. break;
  1402. case ETHTOOL_SRXCLSRLINS:
  1403. ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
  1404. break;
  1405. case ETHTOOL_SRXCLSRLDEL:
  1406. ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
  1407. break;
  1408. default:
  1409. break;
  1410. }
  1411. return ret;
  1412. }
  1413. /**
  1414. * i40e_max_channels - get Max number of combined channels supported
  1415. * @vsi: vsi pointer
  1416. **/
  1417. static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
  1418. {
  1419. /* TODO: This code assumes DCB and FD is disabled for now. */
  1420. return vsi->alloc_queue_pairs;
  1421. }
  1422. /**
  1423. * i40e_get_channels - Get the current channels enabled and max supported etc.
  1424. * @netdev: network interface device structure
  1425. * @ch: ethtool channels structure
  1426. *
  1427. * We don't support separate tx and rx queues as channels. The other count
  1428. * represents how many queues are being used for control. max_combined counts
  1429. * how many queue pairs we can support. They may not be mapped 1 to 1 with
  1430. * q_vectors since we support a lot more queue pairs than q_vectors.
  1431. **/
  1432. static void i40e_get_channels(struct net_device *dev,
  1433. struct ethtool_channels *ch)
  1434. {
  1435. struct i40e_netdev_priv *np = netdev_priv(dev);
  1436. struct i40e_vsi *vsi = np->vsi;
  1437. struct i40e_pf *pf = vsi->back;
  1438. /* report maximum channels */
  1439. ch->max_combined = i40e_max_channels(vsi);
  1440. /* report info for other vector */
  1441. ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
  1442. ch->max_other = ch->other_count;
  1443. /* Note: This code assumes DCB is disabled for now. */
  1444. ch->combined_count = vsi->num_queue_pairs;
  1445. }
  1446. /**
  1447. * i40e_set_channels - Set the new channels count.
  1448. * @netdev: network interface device structure
  1449. * @ch: ethtool channels structure
  1450. *
  1451. * The new channels count may not be the same as requested by the user
  1452. * since it gets rounded down to a power of 2 value.
  1453. **/
  1454. static int i40e_set_channels(struct net_device *dev,
  1455. struct ethtool_channels *ch)
  1456. {
  1457. struct i40e_netdev_priv *np = netdev_priv(dev);
  1458. unsigned int count = ch->combined_count;
  1459. struct i40e_vsi *vsi = np->vsi;
  1460. struct i40e_pf *pf = vsi->back;
  1461. int new_count;
  1462. /* We do not support setting channels for any other VSI at present */
  1463. if (vsi->type != I40E_VSI_MAIN)
  1464. return -EINVAL;
  1465. /* verify they are not requesting separate vectors */
  1466. if (!count || ch->rx_count || ch->tx_count)
  1467. return -EINVAL;
  1468. /* verify other_count has not changed */
  1469. if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
  1470. return -EINVAL;
  1471. /* verify the number of channels does not exceed hardware limits */
  1472. if (count > i40e_max_channels(vsi))
  1473. return -EINVAL;
  1474. /* update feature limits from largest to smallest supported values */
  1475. /* TODO: Flow director limit, DCB etc */
  1476. /* cap RSS limit */
  1477. if (count > pf->rss_size_max)
  1478. count = pf->rss_size_max;
  1479. /* use rss_reconfig to rebuild with new queue count and update traffic
  1480. * class queue mapping
  1481. */
  1482. new_count = i40e_reconfig_rss_queues(pf, count);
  1483. if (new_count > 0)
  1484. return 0;
  1485. else
  1486. return -EINVAL;
  1487. }
  1488. static const struct ethtool_ops i40e_ethtool_ops = {
  1489. .get_settings = i40e_get_settings,
  1490. .get_drvinfo = i40e_get_drvinfo,
  1491. .get_regs_len = i40e_get_regs_len,
  1492. .get_regs = i40e_get_regs,
  1493. .nway_reset = i40e_nway_reset,
  1494. .get_link = ethtool_op_get_link,
  1495. .get_wol = i40e_get_wol,
  1496. .set_wol = i40e_set_wol,
  1497. .get_eeprom_len = i40e_get_eeprom_len,
  1498. .get_eeprom = i40e_get_eeprom,
  1499. .get_ringparam = i40e_get_ringparam,
  1500. .set_ringparam = i40e_set_ringparam,
  1501. .get_pauseparam = i40e_get_pauseparam,
  1502. .get_msglevel = i40e_get_msglevel,
  1503. .set_msglevel = i40e_set_msglevel,
  1504. .get_rxnfc = i40e_get_rxnfc,
  1505. .set_rxnfc = i40e_set_rxnfc,
  1506. .self_test = i40e_diag_test,
  1507. .get_strings = i40e_get_strings,
  1508. .set_phys_id = i40e_set_phys_id,
  1509. .get_sset_count = i40e_get_sset_count,
  1510. .get_ethtool_stats = i40e_get_ethtool_stats,
  1511. .get_coalesce = i40e_get_coalesce,
  1512. .set_coalesce = i40e_set_coalesce,
  1513. .get_channels = i40e_get_channels,
  1514. .set_channels = i40e_set_channels,
  1515. .get_ts_info = i40e_get_ts_info,
  1516. };
  1517. void i40e_set_ethtool_ops(struct net_device *netdev)
  1518. {
  1519. SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
  1520. }