qede_ethtool.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294
  1. /* QLogic qede NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/version.h>
  9. #include <linux/types.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/ethtool.h>
  13. #include <linux/string.h>
  14. #include <linux/pci.h>
  15. #include <linux/capability.h>
  16. #include "qede.h"
  17. #define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
  18. #define QEDE_STAT_STRING(stat_name) (#stat_name)
  19. #define _QEDE_STAT(stat_name, pf_only) \
  20. {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
  21. #define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
  22. #define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
  23. #define QEDE_RQSTAT_OFFSET(stat_name) \
  24. (offsetof(struct qede_rx_queue, stat_name))
  25. #define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
  26. #define QEDE_RQSTAT(stat_name) \
  27. {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
  28. #define QEDE_SELFTEST_POLL_COUNT 100
  29. static const struct {
  30. u64 offset;
  31. char string[ETH_GSTRING_LEN];
  32. } qede_rqstats_arr[] = {
  33. QEDE_RQSTAT(rx_hw_errors),
  34. QEDE_RQSTAT(rx_alloc_errors),
  35. QEDE_RQSTAT(rx_ip_frags),
  36. };
  37. #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
  38. #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
  39. (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
  40. qede_rqstats_arr[(sindex)].offset)))
  41. static const struct {
  42. u64 offset;
  43. char string[ETH_GSTRING_LEN];
  44. bool pf_only;
  45. } qede_stats_arr[] = {
  46. QEDE_STAT(rx_ucast_bytes),
  47. QEDE_STAT(rx_mcast_bytes),
  48. QEDE_STAT(rx_bcast_bytes),
  49. QEDE_STAT(rx_ucast_pkts),
  50. QEDE_STAT(rx_mcast_pkts),
  51. QEDE_STAT(rx_bcast_pkts),
  52. QEDE_STAT(tx_ucast_bytes),
  53. QEDE_STAT(tx_mcast_bytes),
  54. QEDE_STAT(tx_bcast_bytes),
  55. QEDE_STAT(tx_ucast_pkts),
  56. QEDE_STAT(tx_mcast_pkts),
  57. QEDE_STAT(tx_bcast_pkts),
  58. QEDE_PF_STAT(rx_64_byte_packets),
  59. QEDE_PF_STAT(rx_65_to_127_byte_packets),
  60. QEDE_PF_STAT(rx_128_to_255_byte_packets),
  61. QEDE_PF_STAT(rx_256_to_511_byte_packets),
  62. QEDE_PF_STAT(rx_512_to_1023_byte_packets),
  63. QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
  64. QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
  65. QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
  66. QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
  67. QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
  68. QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
  69. QEDE_PF_STAT(tx_64_byte_packets),
  70. QEDE_PF_STAT(tx_65_to_127_byte_packets),
  71. QEDE_PF_STAT(tx_128_to_255_byte_packets),
  72. QEDE_PF_STAT(tx_256_to_511_byte_packets),
  73. QEDE_PF_STAT(tx_512_to_1023_byte_packets),
  74. QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
  75. QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
  76. QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
  77. QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
  78. QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
  79. QEDE_PF_STAT(rx_mac_crtl_frames),
  80. QEDE_PF_STAT(tx_mac_ctrl_frames),
  81. QEDE_PF_STAT(rx_pause_frames),
  82. QEDE_PF_STAT(tx_pause_frames),
  83. QEDE_PF_STAT(rx_pfc_frames),
  84. QEDE_PF_STAT(tx_pfc_frames),
  85. QEDE_PF_STAT(rx_crc_errors),
  86. QEDE_PF_STAT(rx_align_errors),
  87. QEDE_PF_STAT(rx_carrier_errors),
  88. QEDE_PF_STAT(rx_oversize_packets),
  89. QEDE_PF_STAT(rx_jabbers),
  90. QEDE_PF_STAT(rx_undersize_packets),
  91. QEDE_PF_STAT(rx_fragments),
  92. QEDE_PF_STAT(tx_lpi_entry_count),
  93. QEDE_PF_STAT(tx_total_collisions),
  94. QEDE_PF_STAT(brb_truncates),
  95. QEDE_PF_STAT(brb_discards),
  96. QEDE_STAT(no_buff_discards),
  97. QEDE_PF_STAT(mftag_filter_discards),
  98. QEDE_PF_STAT(mac_filter_discards),
  99. QEDE_STAT(tx_err_drop_pkts),
  100. QEDE_STAT(coalesced_pkts),
  101. QEDE_STAT(coalesced_events),
  102. QEDE_STAT(coalesced_aborts_num),
  103. QEDE_STAT(non_coalesced_pkts),
  104. QEDE_STAT(coalesced_bytes),
  105. };
  106. #define QEDE_STATS_DATA(dev, index) \
  107. (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
  108. + qede_stats_arr[(index)].offset)))
  109. #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
  110. enum {
  111. QEDE_PRI_FLAG_CMT,
  112. QEDE_PRI_FLAG_LEN,
  113. };
  114. static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
  115. "Coupled-Function",
  116. };
  117. enum qede_ethtool_tests {
  118. QEDE_ETHTOOL_INT_LOOPBACK,
  119. QEDE_ETHTOOL_INTERRUPT_TEST,
  120. QEDE_ETHTOOL_MEMORY_TEST,
  121. QEDE_ETHTOOL_REGISTER_TEST,
  122. QEDE_ETHTOOL_CLOCK_TEST,
  123. QEDE_ETHTOOL_TEST_MAX
  124. };
  125. static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
  126. "Internal loopback (offline)",
  127. "Interrupt (online)\t",
  128. "Memory (online)\t\t",
  129. "Register (online)\t",
  130. "Clock (online)\t\t",
  131. };
  132. static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
  133. {
  134. int i, j, k;
  135. for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
  136. if (IS_VF(edev) && qede_stats_arr[i].pf_only)
  137. continue;
  138. strcpy(buf + j * ETH_GSTRING_LEN,
  139. qede_stats_arr[i].string);
  140. j++;
  141. }
  142. for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
  143. strcpy(buf + j * ETH_GSTRING_LEN,
  144. qede_rqstats_arr[k].string);
  145. }
  146. static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  147. {
  148. struct qede_dev *edev = netdev_priv(dev);
  149. switch (stringset) {
  150. case ETH_SS_STATS:
  151. qede_get_strings_stats(edev, buf);
  152. break;
  153. case ETH_SS_PRIV_FLAGS:
  154. memcpy(buf, qede_private_arr,
  155. ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
  156. break;
  157. case ETH_SS_TEST:
  158. memcpy(buf, qede_tests_str_arr,
  159. ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
  160. break;
  161. default:
  162. DP_VERBOSE(edev, QED_MSG_DEBUG,
  163. "Unsupported stringset 0x%08x\n", stringset);
  164. }
  165. }
  166. static void qede_get_ethtool_stats(struct net_device *dev,
  167. struct ethtool_stats *stats, u64 *buf)
  168. {
  169. struct qede_dev *edev = netdev_priv(dev);
  170. int sidx, cnt = 0;
  171. int qid;
  172. qede_fill_by_demand_stats(edev);
  173. mutex_lock(&edev->qede_lock);
  174. for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
  175. if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
  176. continue;
  177. buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
  178. }
  179. for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
  180. buf[cnt] = 0;
  181. for (qid = 0; qid < edev->num_rss; qid++)
  182. buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
  183. cnt++;
  184. }
  185. mutex_unlock(&edev->qede_lock);
  186. }
  187. static int qede_get_sset_count(struct net_device *dev, int stringset)
  188. {
  189. struct qede_dev *edev = netdev_priv(dev);
  190. int num_stats = QEDE_NUM_STATS;
  191. switch (stringset) {
  192. case ETH_SS_STATS:
  193. if (IS_VF(edev)) {
  194. int i;
  195. for (i = 0; i < QEDE_NUM_STATS; i++)
  196. if (qede_stats_arr[i].pf_only)
  197. num_stats--;
  198. }
  199. return num_stats + QEDE_NUM_RQSTATS;
  200. case ETH_SS_PRIV_FLAGS:
  201. return QEDE_PRI_FLAG_LEN;
  202. case ETH_SS_TEST:
  203. if (!IS_VF(edev))
  204. return QEDE_ETHTOOL_TEST_MAX;
  205. else
  206. return 0;
  207. default:
  208. DP_VERBOSE(edev, QED_MSG_DEBUG,
  209. "Unsupported stringset 0x%08x\n", stringset);
  210. return -EINVAL;
  211. }
  212. }
  213. static u32 qede_get_priv_flags(struct net_device *dev)
  214. {
  215. struct qede_dev *edev = netdev_priv(dev);
  216. return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
  217. }
  218. static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  219. {
  220. struct qede_dev *edev = netdev_priv(dev);
  221. struct qed_link_output current_link;
  222. memset(&current_link, 0, sizeof(current_link));
  223. edev->ops->common->get_link(edev->cdev, &current_link);
  224. cmd->supported = current_link.supported_caps;
  225. cmd->advertising = current_link.advertised_caps;
  226. if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
  227. ethtool_cmd_speed_set(cmd, current_link.speed);
  228. cmd->duplex = current_link.duplex;
  229. } else {
  230. cmd->duplex = DUPLEX_UNKNOWN;
  231. ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  232. }
  233. cmd->port = current_link.port;
  234. cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
  235. AUTONEG_DISABLE;
  236. cmd->lp_advertising = current_link.lp_caps;
  237. return 0;
  238. }
  239. static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  240. {
  241. struct qede_dev *edev = netdev_priv(dev);
  242. struct qed_link_output current_link;
  243. struct qed_link_params params;
  244. u32 speed;
  245. if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
  246. DP_INFO(edev,
  247. "Link settings are not allowed to be changed\n");
  248. return -EOPNOTSUPP;
  249. }
  250. memset(&current_link, 0, sizeof(current_link));
  251. memset(&params, 0, sizeof(params));
  252. edev->ops->common->get_link(edev->cdev, &current_link);
  253. speed = ethtool_cmd_speed(cmd);
  254. params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
  255. params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
  256. if (cmd->autoneg == AUTONEG_ENABLE) {
  257. params.autoneg = true;
  258. params.forced_speed = 0;
  259. params.adv_speeds = cmd->advertising;
  260. } else { /* forced speed */
  261. params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
  262. params.autoneg = false;
  263. params.forced_speed = speed;
  264. switch (speed) {
  265. case SPEED_10000:
  266. if (!(current_link.supported_caps &
  267. SUPPORTED_10000baseKR_Full)) {
  268. DP_INFO(edev, "10G speed not supported\n");
  269. return -EINVAL;
  270. }
  271. params.adv_speeds = SUPPORTED_10000baseKR_Full;
  272. break;
  273. case SPEED_40000:
  274. if (!(current_link.supported_caps &
  275. SUPPORTED_40000baseLR4_Full)) {
  276. DP_INFO(edev, "40G speed not supported\n");
  277. return -EINVAL;
  278. }
  279. params.adv_speeds = SUPPORTED_40000baseLR4_Full;
  280. break;
  281. default:
  282. DP_INFO(edev, "Unsupported speed %u\n", speed);
  283. return -EINVAL;
  284. }
  285. }
  286. params.link_up = true;
  287. edev->ops->common->set_link(edev->cdev, &params);
  288. return 0;
  289. }
  290. static void qede_get_drvinfo(struct net_device *ndev,
  291. struct ethtool_drvinfo *info)
  292. {
  293. char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
  294. struct qede_dev *edev = netdev_priv(ndev);
  295. strlcpy(info->driver, "qede", sizeof(info->driver));
  296. strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
  297. snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
  298. edev->dev_info.common.fw_major,
  299. edev->dev_info.common.fw_minor,
  300. edev->dev_info.common.fw_rev,
  301. edev->dev_info.common.fw_eng);
  302. snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
  303. (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
  304. (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
  305. (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
  306. edev->dev_info.common.mfw_rev & 0xFF);
  307. if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
  308. sizeof(info->fw_version)) {
  309. snprintf(info->fw_version, sizeof(info->fw_version),
  310. "mfw %s storm %s", mfw, storm);
  311. } else {
  312. snprintf(info->fw_version, sizeof(info->fw_version),
  313. "%s %s", mfw, storm);
  314. }
  315. strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
  316. }
  317. static u32 qede_get_msglevel(struct net_device *ndev)
  318. {
  319. struct qede_dev *edev = netdev_priv(ndev);
  320. return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
  321. edev->dp_module;
  322. }
  323. static void qede_set_msglevel(struct net_device *ndev, u32 level)
  324. {
  325. struct qede_dev *edev = netdev_priv(ndev);
  326. u32 dp_module = 0;
  327. u8 dp_level = 0;
  328. qede_config_debug(level, &dp_module, &dp_level);
  329. edev->dp_level = dp_level;
  330. edev->dp_module = dp_module;
  331. edev->ops->common->update_msglvl(edev->cdev,
  332. dp_module, dp_level);
  333. }
  334. static int qede_nway_reset(struct net_device *dev)
  335. {
  336. struct qede_dev *edev = netdev_priv(dev);
  337. struct qed_link_output current_link;
  338. struct qed_link_params link_params;
  339. if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
  340. DP_INFO(edev,
  341. "Link settings are not allowed to be changed\n");
  342. return -EOPNOTSUPP;
  343. }
  344. if (!netif_running(dev))
  345. return 0;
  346. memset(&current_link, 0, sizeof(current_link));
  347. edev->ops->common->get_link(edev->cdev, &current_link);
  348. if (!current_link.link_up)
  349. return 0;
  350. /* Toggle the link */
  351. memset(&link_params, 0, sizeof(link_params));
  352. link_params.link_up = false;
  353. edev->ops->common->set_link(edev->cdev, &link_params);
  354. link_params.link_up = true;
  355. edev->ops->common->set_link(edev->cdev, &link_params);
  356. return 0;
  357. }
  358. static u32 qede_get_link(struct net_device *dev)
  359. {
  360. struct qede_dev *edev = netdev_priv(dev);
  361. struct qed_link_output current_link;
  362. memset(&current_link, 0, sizeof(current_link));
  363. edev->ops->common->get_link(edev->cdev, &current_link);
  364. return current_link.link_up;
  365. }
  366. static int qede_get_coalesce(struct net_device *dev,
  367. struct ethtool_coalesce *coal)
  368. {
  369. struct qede_dev *edev = netdev_priv(dev);
  370. u16 rxc, txc;
  371. memset(coal, 0, sizeof(struct ethtool_coalesce));
  372. edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
  373. coal->rx_coalesce_usecs = rxc;
  374. coal->tx_coalesce_usecs = txc;
  375. return 0;
  376. }
  377. static int qede_set_coalesce(struct net_device *dev,
  378. struct ethtool_coalesce *coal)
  379. {
  380. struct qede_dev *edev = netdev_priv(dev);
  381. int i, rc = 0;
  382. u16 rxc, txc;
  383. u8 sb_id;
  384. if (!netif_running(dev)) {
  385. DP_INFO(edev, "Interface is down\n");
  386. return -EINVAL;
  387. }
  388. if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
  389. coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
  390. DP_INFO(edev,
  391. "Can't support requested %s coalesce value [max supported value %d]\n",
  392. coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
  393. : "tx",
  394. QED_COALESCE_MAX);
  395. return -EINVAL;
  396. }
  397. rxc = (u16)coal->rx_coalesce_usecs;
  398. txc = (u16)coal->tx_coalesce_usecs;
  399. for_each_rss(i) {
  400. sb_id = edev->fp_array[i].sb_info->igu_sb_id;
  401. rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
  402. (u8)i, sb_id);
  403. if (rc) {
  404. DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
  405. return rc;
  406. }
  407. }
  408. return rc;
  409. }
  410. static void qede_get_ringparam(struct net_device *dev,
  411. struct ethtool_ringparam *ering)
  412. {
  413. struct qede_dev *edev = netdev_priv(dev);
  414. ering->rx_max_pending = NUM_RX_BDS_MAX;
  415. ering->rx_pending = edev->q_num_rx_buffers;
  416. ering->tx_max_pending = NUM_TX_BDS_MAX;
  417. ering->tx_pending = edev->q_num_tx_buffers;
  418. }
  419. static int qede_set_ringparam(struct net_device *dev,
  420. struct ethtool_ringparam *ering)
  421. {
  422. struct qede_dev *edev = netdev_priv(dev);
  423. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  424. "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
  425. ering->rx_pending, ering->tx_pending);
  426. /* Validate legality of configuration */
  427. if (ering->rx_pending > NUM_RX_BDS_MAX ||
  428. ering->rx_pending < NUM_RX_BDS_MIN ||
  429. ering->tx_pending > NUM_TX_BDS_MAX ||
  430. ering->tx_pending < NUM_TX_BDS_MIN) {
  431. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  432. "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n",
  433. NUM_RX_BDS_MIN, NUM_RX_BDS_MAX,
  434. NUM_TX_BDS_MIN, NUM_TX_BDS_MAX);
  435. return -EINVAL;
  436. }
  437. /* Change ring size and re-load */
  438. edev->q_num_rx_buffers = ering->rx_pending;
  439. edev->q_num_tx_buffers = ering->tx_pending;
  440. if (netif_running(edev->ndev))
  441. qede_reload(edev, NULL, NULL);
  442. return 0;
  443. }
  444. static void qede_get_pauseparam(struct net_device *dev,
  445. struct ethtool_pauseparam *epause)
  446. {
  447. struct qede_dev *edev = netdev_priv(dev);
  448. struct qed_link_output current_link;
  449. memset(&current_link, 0, sizeof(current_link));
  450. edev->ops->common->get_link(edev->cdev, &current_link);
  451. if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
  452. epause->autoneg = true;
  453. if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
  454. epause->rx_pause = true;
  455. if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
  456. epause->tx_pause = true;
  457. DP_VERBOSE(edev, QED_MSG_DEBUG,
  458. "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n",
  459. epause->cmd, epause->autoneg, epause->rx_pause,
  460. epause->tx_pause);
  461. }
  462. static int qede_set_pauseparam(struct net_device *dev,
  463. struct ethtool_pauseparam *epause)
  464. {
  465. struct qede_dev *edev = netdev_priv(dev);
  466. struct qed_link_params params;
  467. struct qed_link_output current_link;
  468. if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
  469. DP_INFO(edev,
  470. "Pause settings are not allowed to be changed\n");
  471. return -EOPNOTSUPP;
  472. }
  473. memset(&current_link, 0, sizeof(current_link));
  474. edev->ops->common->get_link(edev->cdev, &current_link);
  475. memset(&params, 0, sizeof(params));
  476. params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
  477. if (epause->autoneg) {
  478. if (!(current_link.supported_caps & SUPPORTED_Autoneg)) {
  479. DP_INFO(edev, "autoneg not supported\n");
  480. return -EINVAL;
  481. }
  482. params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
  483. }
  484. if (epause->rx_pause)
  485. params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
  486. if (epause->tx_pause)
  487. params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
  488. params.link_up = true;
  489. edev->ops->common->set_link(edev->cdev, &params);
  490. return 0;
  491. }
  492. static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
  493. {
  494. edev->ndev->mtu = args->mtu;
  495. }
  496. /* Netdevice NDOs */
  497. #define ETH_MAX_JUMBO_PACKET_SIZE 9600
  498. #define ETH_MIN_PACKET_SIZE 60
  499. int qede_change_mtu(struct net_device *ndev, int new_mtu)
  500. {
  501. struct qede_dev *edev = netdev_priv(ndev);
  502. union qede_reload_args args;
  503. if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
  504. ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
  505. DP_ERR(edev, "Can't support requested MTU size\n");
  506. return -EINVAL;
  507. }
  508. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  509. "Configuring MTU size of %d\n", new_mtu);
  510. /* Set the mtu field and re-start the interface if needed*/
  511. args.mtu = new_mtu;
  512. if (netif_running(edev->ndev))
  513. qede_reload(edev, &qede_update_mtu, &args);
  514. qede_update_mtu(edev, &args);
  515. return 0;
  516. }
  517. static void qede_get_channels(struct net_device *dev,
  518. struct ethtool_channels *channels)
  519. {
  520. struct qede_dev *edev = netdev_priv(dev);
  521. channels->max_combined = QEDE_MAX_RSS_CNT(edev);
  522. channels->combined_count = QEDE_RSS_CNT(edev);
  523. }
  524. static int qede_set_channels(struct net_device *dev,
  525. struct ethtool_channels *channels)
  526. {
  527. struct qede_dev *edev = netdev_priv(dev);
  528. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  529. "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
  530. channels->rx_count, channels->tx_count,
  531. channels->other_count, channels->combined_count);
  532. /* We don't support separate rx / tx, nor `other' channels. */
  533. if (channels->rx_count || channels->tx_count ||
  534. channels->other_count || (channels->combined_count == 0) ||
  535. (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) {
  536. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  537. "command parameters not supported\n");
  538. return -EINVAL;
  539. }
  540. /* Check if there was a change in the active parameters */
  541. if (channels->combined_count == QEDE_RSS_CNT(edev)) {
  542. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  543. "No change in active parameters\n");
  544. return 0;
  545. }
  546. /* We need the number of queues to be divisible between the hwfns */
  547. if (channels->combined_count % edev->dev_info.common.num_hwfns) {
  548. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  549. "Number of channels must be divisable by %04x\n",
  550. edev->dev_info.common.num_hwfns);
  551. return -EINVAL;
  552. }
  553. /* Set number of queues and reload if necessary */
  554. edev->req_rss = channels->combined_count;
  555. if (netif_running(dev))
  556. qede_reload(edev, NULL, NULL);
  557. return 0;
  558. }
  559. static int qede_set_phys_id(struct net_device *dev,
  560. enum ethtool_phys_id_state state)
  561. {
  562. struct qede_dev *edev = netdev_priv(dev);
  563. u8 led_state = 0;
  564. switch (state) {
  565. case ETHTOOL_ID_ACTIVE:
  566. return 1; /* cycle on/off once per second */
  567. case ETHTOOL_ID_ON:
  568. led_state = QED_LED_MODE_ON;
  569. break;
  570. case ETHTOOL_ID_OFF:
  571. led_state = QED_LED_MODE_OFF;
  572. break;
  573. case ETHTOOL_ID_INACTIVE:
  574. led_state = QED_LED_MODE_RESTORE;
  575. break;
  576. }
  577. edev->ops->common->set_led(edev->cdev, led_state);
  578. return 0;
  579. }
  580. static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
  581. {
  582. info->data = RXH_IP_SRC | RXH_IP_DST;
  583. switch (info->flow_type) {
  584. case TCP_V4_FLOW:
  585. case TCP_V6_FLOW:
  586. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  587. break;
  588. case UDP_V4_FLOW:
  589. if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
  590. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  591. break;
  592. case UDP_V6_FLOW:
  593. if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
  594. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  595. break;
  596. case IPV4_FLOW:
  597. case IPV6_FLOW:
  598. break;
  599. default:
  600. info->data = 0;
  601. break;
  602. }
  603. return 0;
  604. }
  605. static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
  606. u32 *rules __always_unused)
  607. {
  608. struct qede_dev *edev = netdev_priv(dev);
  609. switch (info->cmd) {
  610. case ETHTOOL_GRXRINGS:
  611. info->data = edev->num_rss;
  612. return 0;
  613. case ETHTOOL_GRXFH:
  614. return qede_get_rss_flags(edev, info);
  615. default:
  616. DP_ERR(edev, "Command parameters not supported\n");
  617. return -EOPNOTSUPP;
  618. }
  619. }
  620. static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
  621. {
  622. struct qed_update_vport_params vport_update_params;
  623. u8 set_caps = 0, clr_caps = 0;
  624. DP_VERBOSE(edev, QED_MSG_DEBUG,
  625. "Set rss flags command parameters: flow type = %d, data = %llu\n",
  626. info->flow_type, info->data);
  627. switch (info->flow_type) {
  628. case TCP_V4_FLOW:
  629. case TCP_V6_FLOW:
  630. /* For TCP only 4-tuple hash is supported */
  631. if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
  632. RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  633. DP_INFO(edev, "Command parameters not supported\n");
  634. return -EINVAL;
  635. }
  636. return 0;
  637. case UDP_V4_FLOW:
  638. /* For UDP either 2-tuple hash or 4-tuple hash is supported */
  639. if (info->data == (RXH_IP_SRC | RXH_IP_DST |
  640. RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  641. set_caps = QED_RSS_IPV4_UDP;
  642. DP_VERBOSE(edev, QED_MSG_DEBUG,
  643. "UDP 4-tuple enabled\n");
  644. } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
  645. clr_caps = QED_RSS_IPV4_UDP;
  646. DP_VERBOSE(edev, QED_MSG_DEBUG,
  647. "UDP 4-tuple disabled\n");
  648. } else {
  649. return -EINVAL;
  650. }
  651. break;
  652. case UDP_V6_FLOW:
  653. /* For UDP either 2-tuple hash or 4-tuple hash is supported */
  654. if (info->data == (RXH_IP_SRC | RXH_IP_DST |
  655. RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  656. set_caps = QED_RSS_IPV6_UDP;
  657. DP_VERBOSE(edev, QED_MSG_DEBUG,
  658. "UDP 4-tuple enabled\n");
  659. } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
  660. clr_caps = QED_RSS_IPV6_UDP;
  661. DP_VERBOSE(edev, QED_MSG_DEBUG,
  662. "UDP 4-tuple disabled\n");
  663. } else {
  664. return -EINVAL;
  665. }
  666. break;
  667. case IPV4_FLOW:
  668. case IPV6_FLOW:
  669. /* For IP only 2-tuple hash is supported */
  670. if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
  671. DP_INFO(edev, "Command parameters not supported\n");
  672. return -EINVAL;
  673. }
  674. return 0;
  675. case SCTP_V4_FLOW:
  676. case AH_ESP_V4_FLOW:
  677. case AH_V4_FLOW:
  678. case ESP_V4_FLOW:
  679. case SCTP_V6_FLOW:
  680. case AH_ESP_V6_FLOW:
  681. case AH_V6_FLOW:
  682. case ESP_V6_FLOW:
  683. case IP_USER_FLOW:
  684. case ETHER_FLOW:
  685. /* RSS is not supported for these protocols */
  686. if (info->data) {
  687. DP_INFO(edev, "Command parameters not supported\n");
  688. return -EINVAL;
  689. }
  690. return 0;
  691. default:
  692. return -EINVAL;
  693. }
  694. /* No action is needed if there is no change in the rss capability */
  695. if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
  696. ~clr_caps) | set_caps))
  697. return 0;
  698. /* Update internal configuration */
  699. edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
  700. set_caps;
  701. edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
  702. /* Re-configure if possible */
  703. if (netif_running(edev->ndev)) {
  704. memset(&vport_update_params, 0, sizeof(vport_update_params));
  705. vport_update_params.update_rss_flg = 1;
  706. vport_update_params.vport_id = 0;
  707. memcpy(&vport_update_params.rss_params, &edev->rss_params,
  708. sizeof(vport_update_params.rss_params));
  709. return edev->ops->vport_update(edev->cdev,
  710. &vport_update_params);
  711. }
  712. return 0;
  713. }
  714. static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
  715. {
  716. struct qede_dev *edev = netdev_priv(dev);
  717. switch (info->cmd) {
  718. case ETHTOOL_SRXFH:
  719. return qede_set_rss_flags(edev, info);
  720. default:
  721. DP_INFO(edev, "Command parameters not supported\n");
  722. return -EOPNOTSUPP;
  723. }
  724. }
  725. static u32 qede_get_rxfh_indir_size(struct net_device *dev)
  726. {
  727. return QED_RSS_IND_TABLE_SIZE;
  728. }
  729. static u32 qede_get_rxfh_key_size(struct net_device *dev)
  730. {
  731. struct qede_dev *edev = netdev_priv(dev);
  732. return sizeof(edev->rss_params.rss_key);
  733. }
  734. static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
  735. {
  736. struct qede_dev *edev = netdev_priv(dev);
  737. int i;
  738. if (hfunc)
  739. *hfunc = ETH_RSS_HASH_TOP;
  740. if (!indir)
  741. return 0;
  742. for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
  743. indir[i] = edev->rss_params.rss_ind_table[i];
  744. if (key)
  745. memcpy(key, edev->rss_params.rss_key,
  746. qede_get_rxfh_key_size(dev));
  747. return 0;
  748. }
  749. static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
  750. const u8 *key, const u8 hfunc)
  751. {
  752. struct qed_update_vport_params vport_update_params;
  753. struct qede_dev *edev = netdev_priv(dev);
  754. int i;
  755. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  756. return -EOPNOTSUPP;
  757. if (!indir && !key)
  758. return 0;
  759. if (indir) {
  760. for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
  761. edev->rss_params.rss_ind_table[i] = indir[i];
  762. edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
  763. }
  764. if (key) {
  765. memcpy(&edev->rss_params.rss_key, key,
  766. qede_get_rxfh_key_size(dev));
  767. edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
  768. }
  769. if (netif_running(edev->ndev)) {
  770. memset(&vport_update_params, 0, sizeof(vport_update_params));
  771. vport_update_params.update_rss_flg = 1;
  772. vport_update_params.vport_id = 0;
  773. memcpy(&vport_update_params.rss_params, &edev->rss_params,
  774. sizeof(vport_update_params.rss_params));
  775. return edev->ops->vport_update(edev->cdev,
  776. &vport_update_params);
  777. }
  778. return 0;
  779. }
  780. /* This function enables the interrupt generation and the NAPI on the device */
  781. static void qede_netif_start(struct qede_dev *edev)
  782. {
  783. int i;
  784. if (!netif_running(edev->ndev))
  785. return;
  786. for_each_rss(i) {
  787. /* Update and reenable interrupts */
  788. qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
  789. napi_enable(&edev->fp_array[i].napi);
  790. }
  791. }
  792. /* This function disables the NAPI and the interrupt generation on the device */
  793. static void qede_netif_stop(struct qede_dev *edev)
  794. {
  795. int i;
  796. for_each_rss(i) {
  797. napi_disable(&edev->fp_array[i].napi);
  798. /* Disable interrupts */
  799. qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
  800. }
  801. }
  802. static int qede_selftest_transmit_traffic(struct qede_dev *edev,
  803. struct sk_buff *skb)
  804. {
  805. struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
  806. struct eth_tx_1st_bd *first_bd;
  807. dma_addr_t mapping;
  808. int i, idx, val;
  809. /* Fill the entry in the SW ring and the BDs in the FW ring */
  810. idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
  811. txq->sw_tx_ring[idx].skb = skb;
  812. first_bd = qed_chain_produce(&txq->tx_pbl);
  813. memset(first_bd, 0, sizeof(*first_bd));
  814. val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
  815. first_bd->data.bd_flags.bitfields = val;
  816. val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
  817. first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
  818. /* Map skb linear data for DMA and set in the first BD */
  819. mapping = dma_map_single(&edev->pdev->dev, skb->data,
  820. skb_headlen(skb), DMA_TO_DEVICE);
  821. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  822. DP_NOTICE(edev, "SKB mapping failed\n");
  823. return -ENOMEM;
  824. }
  825. BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
  826. /* update the first BD with the actual num BDs */
  827. first_bd->data.nbds = 1;
  828. txq->sw_tx_prod++;
  829. /* 'next page' entries are counted in the producer value */
  830. val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
  831. txq->tx_db.data.bd_prod = val;
  832. /* wmb makes sure that the BDs data is updated before updating the
  833. * producer, otherwise FW may read old data from the BDs.
  834. */
  835. wmb();
  836. barrier();
  837. writel(txq->tx_db.raw, txq->doorbell_addr);
  838. /* mmiowb is needed to synchronize doorbell writes from more than one
  839. * processor. It guarantees that the write arrives to the device before
  840. * the queue lock is released and another start_xmit is called (possibly
  841. * on another CPU). Without this barrier, the next doorbell can bypass
  842. * this doorbell. This is applicable to IA64/Altix systems.
  843. */
  844. mmiowb();
  845. for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
  846. if (qede_txq_has_work(txq))
  847. break;
  848. usleep_range(100, 200);
  849. }
  850. if (!qede_txq_has_work(txq)) {
  851. DP_NOTICE(edev, "Tx completion didn't happen\n");
  852. return -1;
  853. }
  854. first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
  855. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
  856. BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
  857. txq->sw_tx_cons++;
  858. txq->sw_tx_ring[idx].skb = NULL;
  859. return 0;
  860. }
  861. static int qede_selftest_receive_traffic(struct qede_dev *edev)
  862. {
  863. struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
  864. u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
  865. struct eth_fast_path_rx_reg_cqe *fp_cqe;
  866. struct sw_rx_data *sw_rx_data;
  867. union eth_rx_cqe *cqe;
  868. u8 *data_ptr;
  869. int i;
  870. /* The packet is expected to receive on rx-queue 0 even though RSS is
  871. * enabled. This is because the queue 0 is configured as the default
  872. * queue and that the loopback traffic is not IP.
  873. */
  874. for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
  875. if (qede_has_rx_work(rxq))
  876. break;
  877. usleep_range(100, 200);
  878. }
  879. if (!qede_has_rx_work(rxq)) {
  880. DP_NOTICE(edev, "Failed to receive the traffic\n");
  881. return -1;
  882. }
  883. hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
  884. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  885. /* Memory barrier to prevent the CPU from doing speculative reads of CQE
  886. * / BD before reading hw_comp_cons. If the CQE is read before it is
  887. * written by FW, then FW writes CQE and SB, and then the CPU reads the
  888. * hw_comp_cons, it will use an old CQE.
  889. */
  890. rmb();
  891. /* Get the CQE from the completion ring */
  892. cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
  893. /* Get the data from the SW ring */
  894. sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
  895. sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
  896. fp_cqe = &cqe->fast_path_regular;
  897. len = le16_to_cpu(fp_cqe->len_on_first_bd);
  898. data_ptr = (u8 *)(page_address(sw_rx_data->data) +
  899. fp_cqe->placement_offset + sw_rx_data->page_offset);
  900. for (i = ETH_HLEN; i < len; i++)
  901. if (data_ptr[i] != (unsigned char)(i & 0xff)) {
  902. DP_NOTICE(edev, "Loopback test failed\n");
  903. qede_recycle_rx_bd_ring(rxq, edev, 1);
  904. return -1;
  905. }
  906. qede_recycle_rx_bd_ring(rxq, edev, 1);
  907. return 0;
  908. }
  909. static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
  910. {
  911. struct qed_link_params link_params;
  912. struct sk_buff *skb = NULL;
  913. int rc = 0, i;
  914. u32 pkt_size;
  915. u8 *packet;
  916. if (!netif_running(edev->ndev)) {
  917. DP_NOTICE(edev, "Interface is down\n");
  918. return -EINVAL;
  919. }
  920. qede_netif_stop(edev);
  921. /* Bring up the link in Loopback mode */
  922. memset(&link_params, 0, sizeof(link_params));
  923. link_params.link_up = true;
  924. link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
  925. link_params.loopback_mode = loopback_mode;
  926. edev->ops->common->set_link(edev->cdev, &link_params);
  927. /* Wait for loopback configuration to apply */
  928. msleep_interruptible(500);
  929. /* prepare the loopback packet */
  930. pkt_size = edev->ndev->mtu + ETH_HLEN;
  931. skb = netdev_alloc_skb(edev->ndev, pkt_size);
  932. if (!skb) {
  933. DP_INFO(edev, "Can't allocate skb\n");
  934. rc = -ENOMEM;
  935. goto test_loopback_exit;
  936. }
  937. packet = skb_put(skb, pkt_size);
  938. ether_addr_copy(packet, edev->ndev->dev_addr);
  939. ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
  940. memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
  941. for (i = ETH_HLEN; i < pkt_size; i++)
  942. packet[i] = (unsigned char)(i & 0xff);
  943. rc = qede_selftest_transmit_traffic(edev, skb);
  944. if (rc)
  945. goto test_loopback_exit;
  946. rc = qede_selftest_receive_traffic(edev);
  947. if (rc)
  948. goto test_loopback_exit;
  949. DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
  950. test_loopback_exit:
  951. dev_kfree_skb(skb);
  952. /* Bring up the link in Normal mode */
  953. memset(&link_params, 0, sizeof(link_params));
  954. link_params.link_up = true;
  955. link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
  956. link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
  957. edev->ops->common->set_link(edev->cdev, &link_params);
  958. /* Wait for loopback configuration to apply */
  959. msleep_interruptible(500);
  960. qede_netif_start(edev);
  961. return rc;
  962. }
  963. static void qede_self_test(struct net_device *dev,
  964. struct ethtool_test *etest, u64 *buf)
  965. {
  966. struct qede_dev *edev = netdev_priv(dev);
  967. DP_VERBOSE(edev, QED_MSG_DEBUG,
  968. "Self-test command parameters: offline = %d, external_lb = %d\n",
  969. (etest->flags & ETH_TEST_FL_OFFLINE),
  970. (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
  971. memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
  972. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  973. if (qede_selftest_run_loopback(edev,
  974. QED_LINK_LOOPBACK_INT_PHY)) {
  975. buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
  976. etest->flags |= ETH_TEST_FL_FAILED;
  977. }
  978. }
  979. if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
  980. buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
  981. etest->flags |= ETH_TEST_FL_FAILED;
  982. }
  983. if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
  984. buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
  985. etest->flags |= ETH_TEST_FL_FAILED;
  986. }
  987. if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
  988. buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
  989. etest->flags |= ETH_TEST_FL_FAILED;
  990. }
  991. if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
  992. buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
  993. etest->flags |= ETH_TEST_FL_FAILED;
  994. }
  995. }
  996. static int qede_set_tunable(struct net_device *dev,
  997. const struct ethtool_tunable *tuna,
  998. const void *data)
  999. {
  1000. struct qede_dev *edev = netdev_priv(dev);
  1001. u32 val;
  1002. switch (tuna->id) {
  1003. case ETHTOOL_RX_COPYBREAK:
  1004. val = *(u32 *)data;
  1005. if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
  1006. DP_VERBOSE(edev, QED_MSG_DEBUG,
  1007. "Invalid rx copy break value, range is [%u, %u]",
  1008. QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
  1009. return -EINVAL;
  1010. }
  1011. edev->rx_copybreak = *(u32 *)data;
  1012. break;
  1013. default:
  1014. return -EOPNOTSUPP;
  1015. }
  1016. return 0;
  1017. }
  1018. static int qede_get_tunable(struct net_device *dev,
  1019. const struct ethtool_tunable *tuna, void *data)
  1020. {
  1021. struct qede_dev *edev = netdev_priv(dev);
  1022. switch (tuna->id) {
  1023. case ETHTOOL_RX_COPYBREAK:
  1024. *(u32 *)data = edev->rx_copybreak;
  1025. break;
  1026. default:
  1027. return -EOPNOTSUPP;
  1028. }
  1029. return 0;
  1030. }
  1031. static const struct ethtool_ops qede_ethtool_ops = {
  1032. .get_settings = qede_get_settings,
  1033. .set_settings = qede_set_settings,
  1034. .get_drvinfo = qede_get_drvinfo,
  1035. .get_msglevel = qede_get_msglevel,
  1036. .set_msglevel = qede_set_msglevel,
  1037. .nway_reset = qede_nway_reset,
  1038. .get_link = qede_get_link,
  1039. .get_coalesce = qede_get_coalesce,
  1040. .set_coalesce = qede_set_coalesce,
  1041. .get_ringparam = qede_get_ringparam,
  1042. .set_ringparam = qede_set_ringparam,
  1043. .get_pauseparam = qede_get_pauseparam,
  1044. .set_pauseparam = qede_set_pauseparam,
  1045. .get_strings = qede_get_strings,
  1046. .set_phys_id = qede_set_phys_id,
  1047. .get_ethtool_stats = qede_get_ethtool_stats,
  1048. .get_priv_flags = qede_get_priv_flags,
  1049. .get_sset_count = qede_get_sset_count,
  1050. .get_rxnfc = qede_get_rxnfc,
  1051. .set_rxnfc = qede_set_rxnfc,
  1052. .get_rxfh_indir_size = qede_get_rxfh_indir_size,
  1053. .get_rxfh_key_size = qede_get_rxfh_key_size,
  1054. .get_rxfh = qede_get_rxfh,
  1055. .set_rxfh = qede_set_rxfh,
  1056. .get_channels = qede_get_channels,
  1057. .set_channels = qede_set_channels,
  1058. .self_test = qede_self_test,
  1059. .get_tunable = qede_get_tunable,
  1060. .set_tunable = qede_set_tunable,
  1061. };
  1062. static const struct ethtool_ops qede_vf_ethtool_ops = {
  1063. .get_settings = qede_get_settings,
  1064. .get_drvinfo = qede_get_drvinfo,
  1065. .get_msglevel = qede_get_msglevel,
  1066. .set_msglevel = qede_set_msglevel,
  1067. .get_link = qede_get_link,
  1068. .get_ringparam = qede_get_ringparam,
  1069. .set_ringparam = qede_set_ringparam,
  1070. .get_strings = qede_get_strings,
  1071. .get_ethtool_stats = qede_get_ethtool_stats,
  1072. .get_priv_flags = qede_get_priv_flags,
  1073. .get_sset_count = qede_get_sset_count,
  1074. .get_rxnfc = qede_get_rxnfc,
  1075. .set_rxnfc = qede_set_rxnfc,
  1076. .get_rxfh_indir_size = qede_get_rxfh_indir_size,
  1077. .get_rxfh_key_size = qede_get_rxfh_key_size,
  1078. .get_rxfh = qede_get_rxfh,
  1079. .set_rxfh = qede_set_rxfh,
  1080. .get_channels = qede_get_channels,
  1081. .set_channels = qede_set_channels,
  1082. .get_tunable = qede_get_tunable,
  1083. .set_tunable = qede_set_tunable,
  1084. };
  1085. void qede_set_ethtool_ops(struct net_device *dev)
  1086. {
  1087. struct qede_dev *edev = netdev_priv(dev);
  1088. if (IS_VF(edev))
  1089. dev->ethtool_ops = &qede_vf_ethtool_ops;
  1090. else
  1091. dev->ethtool_ops = &qede_ethtool_ops;
  1092. }