ice_common.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_common.h"
  4. #include "ice_sched.h"
  5. #include "ice_adminq_cmd.h"
  6. #define ICE_PF_RESET_WAIT_COUNT 200
  7. /**
  8. * ice_set_mac_type - Sets MAC type
  9. * @hw: pointer to the HW structure
  10. *
  11. * This function sets the MAC type of the adapter based on the
  12. * vendor ID and device ID stored in the hw structure.
  13. */
  14. static enum ice_status ice_set_mac_type(struct ice_hw *hw)
  15. {
  16. if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
  17. return ICE_ERR_DEVICE_NOT_SUPPORTED;
  18. hw->mac_type = ICE_MAC_GENERIC;
  19. return 0;
  20. }
  21. /**
  22. * ice_clear_pf_cfg - Clear PF configuration
  23. * @hw: pointer to the hardware structure
  24. */
  25. enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
  26. {
  27. struct ice_aq_desc desc;
  28. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
  29. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  30. }
  31. /**
  32. * ice_aq_manage_mac_read - manage MAC address read command
  33. * @hw: pointer to the hw struct
  34. * @buf: a virtual buffer to hold the manage MAC read response
  35. * @buf_size: Size of the virtual buffer
  36. * @cd: pointer to command details structure or NULL
  37. *
  38. * This function is used to return per PF station MAC address (0x0107).
  39. * NOTE: Upon successful completion of this command, MAC address information
  40. * is returned in user specified buffer. Please interpret user specified
  41. * buffer as "manage_mac_read" response.
  42. * Response such as various MAC addresses are stored in HW struct (port.mac)
  43. * ice_aq_discover_caps is expected to be called before this function is called.
  44. */
  45. static enum ice_status
  46. ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
  47. struct ice_sq_cd *cd)
  48. {
  49. struct ice_aqc_manage_mac_read_resp *resp;
  50. struct ice_aqc_manage_mac_read *cmd;
  51. struct ice_aq_desc desc;
  52. enum ice_status status;
  53. u16 flags;
  54. cmd = &desc.params.mac_read;
  55. if (buf_size < sizeof(*resp))
  56. return ICE_ERR_BUF_TOO_SHORT;
  57. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
  58. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  59. if (status)
  60. return status;
  61. resp = (struct ice_aqc_manage_mac_read_resp *)buf;
  62. flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
  63. if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
  64. ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
  65. return ICE_ERR_CFG;
  66. }
  67. ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
  68. ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
  69. return 0;
  70. }
  71. /**
  72. * ice_aq_get_phy_caps - returns PHY capabilities
  73. * @pi: port information structure
  74. * @qual_mods: report qualified modules
  75. * @report_mode: report mode capabilities
  76. * @pcaps: structure for PHY capabilities to be filled
  77. * @cd: pointer to command details structure or NULL
  78. *
  79. * Returns the various PHY capabilities supported on the Port (0x0600)
  80. */
  81. static enum ice_status
  82. ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
  83. struct ice_aqc_get_phy_caps_data *pcaps,
  84. struct ice_sq_cd *cd)
  85. {
  86. struct ice_aqc_get_phy_caps *cmd;
  87. u16 pcaps_size = sizeof(*pcaps);
  88. struct ice_aq_desc desc;
  89. enum ice_status status;
  90. cmd = &desc.params.get_phy;
  91. if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
  92. return ICE_ERR_PARAM;
  93. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
  94. if (qual_mods)
  95. cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
  96. cmd->param0 |= cpu_to_le16(report_mode);
  97. status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
  98. if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
  99. pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
  100. return status;
  101. }
  102. /**
  103. * ice_get_media_type - Gets media type
  104. * @pi: port information structure
  105. */
  106. static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
  107. {
  108. struct ice_link_status *hw_link_info;
  109. if (!pi)
  110. return ICE_MEDIA_UNKNOWN;
  111. hw_link_info = &pi->phy.link_info;
  112. if (hw_link_info->phy_type_low) {
  113. switch (hw_link_info->phy_type_low) {
  114. case ICE_PHY_TYPE_LOW_1000BASE_SX:
  115. case ICE_PHY_TYPE_LOW_1000BASE_LX:
  116. case ICE_PHY_TYPE_LOW_10GBASE_SR:
  117. case ICE_PHY_TYPE_LOW_10GBASE_LR:
  118. case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
  119. case ICE_PHY_TYPE_LOW_25GBASE_SR:
  120. case ICE_PHY_TYPE_LOW_25GBASE_LR:
  121. case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
  122. case ICE_PHY_TYPE_LOW_40GBASE_SR4:
  123. case ICE_PHY_TYPE_LOW_40GBASE_LR4:
  124. return ICE_MEDIA_FIBER;
  125. case ICE_PHY_TYPE_LOW_100BASE_TX:
  126. case ICE_PHY_TYPE_LOW_1000BASE_T:
  127. case ICE_PHY_TYPE_LOW_2500BASE_T:
  128. case ICE_PHY_TYPE_LOW_5GBASE_T:
  129. case ICE_PHY_TYPE_LOW_10GBASE_T:
  130. case ICE_PHY_TYPE_LOW_25GBASE_T:
  131. return ICE_MEDIA_BASET;
  132. case ICE_PHY_TYPE_LOW_10G_SFI_DA:
  133. case ICE_PHY_TYPE_LOW_25GBASE_CR:
  134. case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
  135. case ICE_PHY_TYPE_LOW_25GBASE_CR1:
  136. case ICE_PHY_TYPE_LOW_40GBASE_CR4:
  137. return ICE_MEDIA_DA;
  138. case ICE_PHY_TYPE_LOW_1000BASE_KX:
  139. case ICE_PHY_TYPE_LOW_2500BASE_KX:
  140. case ICE_PHY_TYPE_LOW_2500BASE_X:
  141. case ICE_PHY_TYPE_LOW_5GBASE_KR:
  142. case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
  143. case ICE_PHY_TYPE_LOW_25GBASE_KR:
  144. case ICE_PHY_TYPE_LOW_25GBASE_KR1:
  145. case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
  146. case ICE_PHY_TYPE_LOW_40GBASE_KR4:
  147. return ICE_MEDIA_BACKPLANE;
  148. }
  149. }
  150. return ICE_MEDIA_UNKNOWN;
  151. }
  152. /**
  153. * ice_aq_get_link_info
  154. * @pi: port information structure
  155. * @ena_lse: enable/disable LinkStatusEvent reporting
  156. * @link: pointer to link status structure - optional
  157. * @cd: pointer to command details structure or NULL
  158. *
  159. * Get Link Status (0x607). Returns the link status of the adapter.
  160. */
  161. enum ice_status
  162. ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
  163. struct ice_link_status *link, struct ice_sq_cd *cd)
  164. {
  165. struct ice_link_status *hw_link_info_old, *hw_link_info;
  166. struct ice_aqc_get_link_status_data link_data = { 0 };
  167. struct ice_aqc_get_link_status *resp;
  168. enum ice_media_type *hw_media_type;
  169. struct ice_fc_info *hw_fc_info;
  170. bool tx_pause, rx_pause;
  171. struct ice_aq_desc desc;
  172. enum ice_status status;
  173. u16 cmd_flags;
  174. if (!pi)
  175. return ICE_ERR_PARAM;
  176. hw_link_info_old = &pi->phy.link_info_old;
  177. hw_media_type = &pi->phy.media_type;
  178. hw_link_info = &pi->phy.link_info;
  179. hw_fc_info = &pi->fc;
  180. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
  181. cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
  182. resp = &desc.params.get_link_status;
  183. resp->cmd_flags = cpu_to_le16(cmd_flags);
  184. resp->lport_num = pi->lport;
  185. status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
  186. cd);
  187. if (status)
  188. return status;
  189. /* save off old link status information */
  190. *hw_link_info_old = *hw_link_info;
  191. /* update current link status information */
  192. hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
  193. hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
  194. *hw_media_type = ice_get_media_type(pi);
  195. hw_link_info->link_info = link_data.link_info;
  196. hw_link_info->an_info = link_data.an_info;
  197. hw_link_info->ext_info = link_data.ext_info;
  198. hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
  199. hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
  200. /* update fc info */
  201. tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
  202. rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
  203. if (tx_pause && rx_pause)
  204. hw_fc_info->current_mode = ICE_FC_FULL;
  205. else if (tx_pause)
  206. hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
  207. else if (rx_pause)
  208. hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
  209. else
  210. hw_fc_info->current_mode = ICE_FC_NONE;
  211. hw_link_info->lse_ena =
  212. !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
  213. /* save link status information */
  214. if (link)
  215. *link = *hw_link_info;
  216. /* flag cleared so calling functions don't call AQ again */
  217. pi->phy.get_link_info = false;
  218. return status;
  219. }
  220. /**
  221. * ice_init_hw - main hardware initialization routine
  222. * @hw: pointer to the hardware structure
  223. */
  224. enum ice_status ice_init_hw(struct ice_hw *hw)
  225. {
  226. struct ice_aqc_get_phy_caps_data *pcaps;
  227. enum ice_status status;
  228. u16 mac_buf_len;
  229. void *mac_buf;
  230. /* Set MAC type based on DeviceID */
  231. status = ice_set_mac_type(hw);
  232. if (status)
  233. return status;
  234. hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
  235. PF_FUNC_RID_FUNC_NUM_M) >>
  236. PF_FUNC_RID_FUNC_NUM_S;
  237. status = ice_reset(hw, ICE_RESET_PFR);
  238. if (status)
  239. return status;
  240. /* set these values to minimum allowed */
  241. hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
  242. hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
  243. hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
  244. hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
  245. status = ice_init_all_ctrlq(hw);
  246. if (status)
  247. goto err_unroll_cqinit;
  248. status = ice_clear_pf_cfg(hw);
  249. if (status)
  250. goto err_unroll_cqinit;
  251. ice_clear_pxe_mode(hw);
  252. status = ice_init_nvm(hw);
  253. if (status)
  254. goto err_unroll_cqinit;
  255. status = ice_get_caps(hw);
  256. if (status)
  257. goto err_unroll_cqinit;
  258. hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
  259. sizeof(*hw->port_info), GFP_KERNEL);
  260. if (!hw->port_info) {
  261. status = ICE_ERR_NO_MEMORY;
  262. goto err_unroll_cqinit;
  263. }
  264. /* set the back pointer to hw */
  265. hw->port_info->hw = hw;
  266. /* Initialize port_info struct with switch configuration data */
  267. status = ice_get_initial_sw_cfg(hw);
  268. if (status)
  269. goto err_unroll_alloc;
  270. /* Query the allocated resources for tx scheduler */
  271. status = ice_sched_query_res_alloc(hw);
  272. if (status) {
  273. ice_debug(hw, ICE_DBG_SCHED,
  274. "Failed to get scheduler allocated resources\n");
  275. goto err_unroll_alloc;
  276. }
  277. /* Initialize port_info struct with scheduler data */
  278. status = ice_sched_init_port(hw->port_info);
  279. if (status)
  280. goto err_unroll_sched;
  281. pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
  282. if (!pcaps) {
  283. status = ICE_ERR_NO_MEMORY;
  284. goto err_unroll_sched;
  285. }
  286. /* Initialize port_info struct with PHY capabilities */
  287. status = ice_aq_get_phy_caps(hw->port_info, false,
  288. ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
  289. devm_kfree(ice_hw_to_dev(hw), pcaps);
  290. if (status)
  291. goto err_unroll_sched;
  292. /* Initialize port_info struct with link information */
  293. status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
  294. if (status)
  295. goto err_unroll_sched;
  296. /* Get port MAC information */
  297. mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
  298. mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
  299. if (!mac_buf)
  300. goto err_unroll_sched;
  301. status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
  302. devm_kfree(ice_hw_to_dev(hw), mac_buf);
  303. if (status)
  304. goto err_unroll_sched;
  305. return 0;
  306. err_unroll_sched:
  307. ice_sched_cleanup_all(hw);
  308. err_unroll_alloc:
  309. devm_kfree(ice_hw_to_dev(hw), hw->port_info);
  310. err_unroll_cqinit:
  311. ice_shutdown_all_ctrlq(hw);
  312. return status;
  313. }
  314. /**
  315. * ice_deinit_hw - unroll initialization operations done by ice_init_hw
  316. * @hw: pointer to the hardware structure
  317. */
  318. void ice_deinit_hw(struct ice_hw *hw)
  319. {
  320. ice_sched_cleanup_all(hw);
  321. ice_shutdown_all_ctrlq(hw);
  322. if (hw->port_info) {
  323. devm_kfree(ice_hw_to_dev(hw), hw->port_info);
  324. hw->port_info = NULL;
  325. }
  326. }
  327. /**
  328. * ice_check_reset - Check to see if a global reset is complete
  329. * @hw: pointer to the hardware structure
  330. */
  331. enum ice_status ice_check_reset(struct ice_hw *hw)
  332. {
  333. u32 cnt, reg = 0, grst_delay;
  334. /* Poll for Device Active state in case a recent CORER, GLOBR,
  335. * or EMPR has occurred. The grst delay value is in 100ms units.
  336. * Add 1sec for outstanding AQ commands that can take a long time.
  337. */
  338. grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
  339. GLGEN_RSTCTL_GRSTDEL_S) + 10;
  340. for (cnt = 0; cnt < grst_delay; cnt++) {
  341. mdelay(100);
  342. reg = rd32(hw, GLGEN_RSTAT);
  343. if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
  344. break;
  345. }
  346. if (cnt == grst_delay) {
  347. ice_debug(hw, ICE_DBG_INIT,
  348. "Global reset polling failed to complete.\n");
  349. return ICE_ERR_RESET_FAILED;
  350. }
  351. #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
  352. GLNVM_ULD_GLOBR_DONE_M)
  353. /* Device is Active; check Global Reset processes are done */
  354. for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
  355. reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
  356. if (reg == ICE_RESET_DONE_MASK) {
  357. ice_debug(hw, ICE_DBG_INIT,
  358. "Global reset processes done. %d\n", cnt);
  359. break;
  360. }
  361. mdelay(10);
  362. }
  363. if (cnt == ICE_PF_RESET_WAIT_COUNT) {
  364. ice_debug(hw, ICE_DBG_INIT,
  365. "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
  366. reg);
  367. return ICE_ERR_RESET_FAILED;
  368. }
  369. return 0;
  370. }
  371. /**
  372. * ice_pf_reset - Reset the PF
  373. * @hw: pointer to the hardware structure
  374. *
  375. * If a global reset has been triggered, this function checks
  376. * for its completion and then issues the PF reset
  377. */
  378. static enum ice_status ice_pf_reset(struct ice_hw *hw)
  379. {
  380. u32 cnt, reg;
  381. /* If at function entry a global reset was already in progress, i.e.
  382. * state is not 'device active' or any of the reset done bits are not
  383. * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
  384. * global reset is done.
  385. */
  386. if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
  387. (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
  388. /* poll on global reset currently in progress until done */
  389. if (ice_check_reset(hw))
  390. return ICE_ERR_RESET_FAILED;
  391. return 0;
  392. }
  393. /* Reset the PF */
  394. reg = rd32(hw, PFGEN_CTRL);
  395. wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
  396. for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
  397. reg = rd32(hw, PFGEN_CTRL);
  398. if (!(reg & PFGEN_CTRL_PFSWR_M))
  399. break;
  400. mdelay(1);
  401. }
  402. if (cnt == ICE_PF_RESET_WAIT_COUNT) {
  403. ice_debug(hw, ICE_DBG_INIT,
  404. "PF reset polling failed to complete.\n");
  405. return ICE_ERR_RESET_FAILED;
  406. }
  407. return 0;
  408. }
  409. /**
  410. * ice_reset - Perform different types of reset
  411. * @hw: pointer to the hardware structure
  412. * @req: reset request
  413. *
  414. * This function triggers a reset as specified by the req parameter.
  415. *
  416. * Note:
  417. * If anything other than a PF reset is triggered, PXE mode is restored.
  418. * This has to be cleared using ice_clear_pxe_mode again, once the AQ
  419. * interface has been restored in the rebuild flow.
  420. */
  421. enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
  422. {
  423. u32 val = 0;
  424. switch (req) {
  425. case ICE_RESET_PFR:
  426. return ice_pf_reset(hw);
  427. case ICE_RESET_CORER:
  428. ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
  429. val = GLGEN_RTRIG_CORER_M;
  430. break;
  431. case ICE_RESET_GLOBR:
  432. ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
  433. val = GLGEN_RTRIG_GLOBR_M;
  434. break;
  435. }
  436. val |= rd32(hw, GLGEN_RTRIG);
  437. wr32(hw, GLGEN_RTRIG, val);
  438. ice_flush(hw);
  439. /* wait for the FW to be ready */
  440. return ice_check_reset(hw);
  441. }
  442. /**
  443. * ice_debug_cq
  444. * @hw: pointer to the hardware structure
  445. * @mask: debug mask
  446. * @desc: pointer to control queue descriptor
  447. * @buf: pointer to command buffer
  448. * @buf_len: max length of buf
  449. *
  450. * Dumps debug log about control command with descriptor contents.
  451. */
  452. void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
  453. void *buf, u16 buf_len)
  454. {
  455. struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
  456. u16 len;
  457. #ifndef CONFIG_DYNAMIC_DEBUG
  458. if (!(mask & hw->debug_mask))
  459. return;
  460. #endif
  461. if (!desc)
  462. return;
  463. len = le16_to_cpu(cq_desc->datalen);
  464. ice_debug(hw, mask,
  465. "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
  466. le16_to_cpu(cq_desc->opcode),
  467. le16_to_cpu(cq_desc->flags),
  468. le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
  469. ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
  470. le32_to_cpu(cq_desc->cookie_high),
  471. le32_to_cpu(cq_desc->cookie_low));
  472. ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
  473. le32_to_cpu(cq_desc->params.generic.param0),
  474. le32_to_cpu(cq_desc->params.generic.param1));
  475. ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
  476. le32_to_cpu(cq_desc->params.generic.addr_high),
  477. le32_to_cpu(cq_desc->params.generic.addr_low));
  478. if (buf && cq_desc->datalen != 0) {
  479. ice_debug(hw, mask, "Buffer:\n");
  480. if (buf_len < len)
  481. len = buf_len;
  482. ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
  483. }
  484. }
  485. /* FW Admin Queue command wrappers */
  486. /**
  487. * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
  488. * @hw: pointer to the hw struct
  489. * @desc: descriptor describing the command
  490. * @buf: buffer to use for indirect commands (NULL for direct commands)
  491. * @buf_size: size of buffer for indirect commands (0 for direct commands)
  492. * @cd: pointer to command details structure
  493. *
  494. * Helper function to send FW Admin Queue commands to the FW Admin Queue.
  495. */
  496. enum ice_status
  497. ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
  498. u16 buf_size, struct ice_sq_cd *cd)
  499. {
  500. return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
  501. }
  502. /**
  503. * ice_aq_get_fw_ver
  504. * @hw: pointer to the hw struct
  505. * @cd: pointer to command details structure or NULL
  506. *
  507. * Get the firmware version (0x0001) from the admin queue commands
  508. */
  509. enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
  510. {
  511. struct ice_aqc_get_ver *resp;
  512. struct ice_aq_desc desc;
  513. enum ice_status status;
  514. resp = &desc.params.get_ver;
  515. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
  516. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  517. if (!status) {
  518. hw->fw_branch = resp->fw_branch;
  519. hw->fw_maj_ver = resp->fw_major;
  520. hw->fw_min_ver = resp->fw_minor;
  521. hw->fw_patch = resp->fw_patch;
  522. hw->fw_build = le32_to_cpu(resp->fw_build);
  523. hw->api_branch = resp->api_branch;
  524. hw->api_maj_ver = resp->api_major;
  525. hw->api_min_ver = resp->api_minor;
  526. hw->api_patch = resp->api_patch;
  527. }
  528. return status;
  529. }
  530. /**
  531. * ice_aq_q_shutdown
  532. * @hw: pointer to the hw struct
  533. * @unloading: is the driver unloading itself
  534. *
  535. * Tell the Firmware that we're shutting down the AdminQ and whether
  536. * or not the driver is unloading as well (0x0003).
  537. */
  538. enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
  539. {
  540. struct ice_aqc_q_shutdown *cmd;
  541. struct ice_aq_desc desc;
  542. cmd = &desc.params.q_shutdown;
  543. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
  544. if (unloading)
  545. cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
  546. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  547. }
  548. /**
  549. * ice_aq_req_res
  550. * @hw: pointer to the hw struct
  551. * @res: resource id
  552. * @access: access type
  553. * @sdp_number: resource number
  554. * @timeout: the maximum time in ms that the driver may hold the resource
  555. * @cd: pointer to command details structure or NULL
  556. *
  557. * requests common resource using the admin queue commands (0x0008)
  558. */
  559. static enum ice_status
  560. ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
  561. enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
  562. struct ice_sq_cd *cd)
  563. {
  564. struct ice_aqc_req_res *cmd_resp;
  565. struct ice_aq_desc desc;
  566. enum ice_status status;
  567. cmd_resp = &desc.params.res_owner;
  568. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
  569. cmd_resp->res_id = cpu_to_le16(res);
  570. cmd_resp->access_type = cpu_to_le16(access);
  571. cmd_resp->res_number = cpu_to_le32(sdp_number);
  572. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  573. /* The completion specifies the maximum time in ms that the driver
  574. * may hold the resource in the Timeout field.
  575. * If the resource is held by someone else, the command completes with
  576. * busy return value and the timeout field indicates the maximum time
  577. * the current owner of the resource has to free it.
  578. */
  579. if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
  580. *timeout = le32_to_cpu(cmd_resp->timeout);
  581. return status;
  582. }
  583. /**
  584. * ice_aq_release_res
  585. * @hw: pointer to the hw struct
  586. * @res: resource id
  587. * @sdp_number: resource number
  588. * @cd: pointer to command details structure or NULL
  589. *
  590. * release common resource using the admin queue commands (0x0009)
  591. */
  592. static enum ice_status
  593. ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
  594. struct ice_sq_cd *cd)
  595. {
  596. struct ice_aqc_req_res *cmd;
  597. struct ice_aq_desc desc;
  598. cmd = &desc.params.res_owner;
  599. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
  600. cmd->res_id = cpu_to_le16(res);
  601. cmd->res_number = cpu_to_le32(sdp_number);
  602. return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  603. }
  604. /**
  605. * ice_acquire_res
  606. * @hw: pointer to the HW structure
  607. * @res: resource id
  608. * @access: access type (read or write)
  609. *
  610. * This function will attempt to acquire the ownership of a resource.
  611. */
  612. enum ice_status
  613. ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
  614. enum ice_aq_res_access_type access)
  615. {
  616. #define ICE_RES_POLLING_DELAY_MS 10
  617. u32 delay = ICE_RES_POLLING_DELAY_MS;
  618. enum ice_status status;
  619. u32 time_left = 0;
  620. u32 timeout;
  621. status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
  622. /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
  623. * driver has previously acquired the resource and performed any
  624. * necessary updates; in this case the caller does not obtain the
  625. * resource and has no further work to do.
  626. */
  627. if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
  628. status = ICE_ERR_AQ_NO_WORK;
  629. goto ice_acquire_res_exit;
  630. }
  631. if (status)
  632. ice_debug(hw, ICE_DBG_RES,
  633. "resource %d acquire type %d failed.\n", res, access);
  634. /* If necessary, poll until the current lock owner timeouts */
  635. timeout = time_left;
  636. while (status && timeout && time_left) {
  637. mdelay(delay);
  638. timeout = (timeout > delay) ? timeout - delay : 0;
  639. status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
  640. if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
  641. /* lock free, but no work to do */
  642. status = ICE_ERR_AQ_NO_WORK;
  643. break;
  644. }
  645. if (!status)
  646. /* lock acquired */
  647. break;
  648. }
  649. if (status && status != ICE_ERR_AQ_NO_WORK)
  650. ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
  651. ice_acquire_res_exit:
  652. if (status == ICE_ERR_AQ_NO_WORK) {
  653. if (access == ICE_RES_WRITE)
  654. ice_debug(hw, ICE_DBG_RES,
  655. "resource indicates no work to do.\n");
  656. else
  657. ice_debug(hw, ICE_DBG_RES,
  658. "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
  659. }
  660. return status;
  661. }
  662. /**
  663. * ice_release_res
  664. * @hw: pointer to the HW structure
  665. * @res: resource id
  666. *
  667. * This function will release a resource using the proper Admin Command.
  668. */
  669. void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
  670. {
  671. enum ice_status status;
  672. u32 total_delay = 0;
  673. status = ice_aq_release_res(hw, res, 0, NULL);
  674. /* there are some rare cases when trying to release the resource
  675. * results in an admin Q timeout, so handle them correctly
  676. */
  677. while ((status == ICE_ERR_AQ_TIMEOUT) &&
  678. (total_delay < hw->adminq.sq_cmd_timeout)) {
  679. mdelay(1);
  680. status = ice_aq_release_res(hw, res, 0, NULL);
  681. total_delay++;
  682. }
  683. }
  684. /**
  685. * ice_parse_caps - parse function/device capabilities
  686. * @hw: pointer to the hw struct
  687. * @buf: pointer to a buffer containing function/device capability records
  688. * @cap_count: number of capability records in the list
  689. * @opc: type of capabilities list to parse
  690. *
  691. * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
  692. */
  693. static void
  694. ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
  695. enum ice_adminq_opc opc)
  696. {
  697. struct ice_aqc_list_caps_elem *cap_resp;
  698. struct ice_hw_func_caps *func_p = NULL;
  699. struct ice_hw_dev_caps *dev_p = NULL;
  700. struct ice_hw_common_caps *caps;
  701. u32 i;
  702. if (!buf)
  703. return;
  704. cap_resp = (struct ice_aqc_list_caps_elem *)buf;
  705. if (opc == ice_aqc_opc_list_dev_caps) {
  706. dev_p = &hw->dev_caps;
  707. caps = &dev_p->common_cap;
  708. } else if (opc == ice_aqc_opc_list_func_caps) {
  709. func_p = &hw->func_caps;
  710. caps = &func_p->common_cap;
  711. } else {
  712. ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
  713. return;
  714. }
  715. for (i = 0; caps && i < cap_count; i++, cap_resp++) {
  716. u32 logical_id = le32_to_cpu(cap_resp->logical_id);
  717. u32 phys_id = le32_to_cpu(cap_resp->phys_id);
  718. u32 number = le32_to_cpu(cap_resp->number);
  719. u16 cap = le16_to_cpu(cap_resp->cap);
  720. switch (cap) {
  721. case ICE_AQC_CAPS_VSI:
  722. if (dev_p) {
  723. dev_p->num_vsi_allocd_to_host = number;
  724. ice_debug(hw, ICE_DBG_INIT,
  725. "HW caps: Dev.VSI cnt = %d\n",
  726. dev_p->num_vsi_allocd_to_host);
  727. } else if (func_p) {
  728. func_p->guaranteed_num_vsi = number;
  729. ice_debug(hw, ICE_DBG_INIT,
  730. "HW caps: Func.VSI cnt = %d\n",
  731. func_p->guaranteed_num_vsi);
  732. }
  733. break;
  734. case ICE_AQC_CAPS_RSS:
  735. caps->rss_table_size = number;
  736. caps->rss_table_entry_width = logical_id;
  737. ice_debug(hw, ICE_DBG_INIT,
  738. "HW caps: RSS table size = %d\n",
  739. caps->rss_table_size);
  740. ice_debug(hw, ICE_DBG_INIT,
  741. "HW caps: RSS table width = %d\n",
  742. caps->rss_table_entry_width);
  743. break;
  744. case ICE_AQC_CAPS_RXQS:
  745. caps->num_rxq = number;
  746. caps->rxq_first_id = phys_id;
  747. ice_debug(hw, ICE_DBG_INIT,
  748. "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
  749. ice_debug(hw, ICE_DBG_INIT,
  750. "HW caps: Rx first queue ID = %d\n",
  751. caps->rxq_first_id);
  752. break;
  753. case ICE_AQC_CAPS_TXQS:
  754. caps->num_txq = number;
  755. caps->txq_first_id = phys_id;
  756. ice_debug(hw, ICE_DBG_INIT,
  757. "HW caps: Num Tx Qs = %d\n", caps->num_txq);
  758. ice_debug(hw, ICE_DBG_INIT,
  759. "HW caps: Tx first queue ID = %d\n",
  760. caps->txq_first_id);
  761. break;
  762. case ICE_AQC_CAPS_MSIX:
  763. caps->num_msix_vectors = number;
  764. caps->msix_vector_first_id = phys_id;
  765. ice_debug(hw, ICE_DBG_INIT,
  766. "HW caps: MSIX vector count = %d\n",
  767. caps->num_msix_vectors);
  768. ice_debug(hw, ICE_DBG_INIT,
  769. "HW caps: MSIX first vector index = %d\n",
  770. caps->msix_vector_first_id);
  771. break;
  772. case ICE_AQC_CAPS_MAX_MTU:
  773. caps->max_mtu = number;
  774. if (dev_p)
  775. ice_debug(hw, ICE_DBG_INIT,
  776. "HW caps: Dev.MaxMTU = %d\n",
  777. caps->max_mtu);
  778. else if (func_p)
  779. ice_debug(hw, ICE_DBG_INIT,
  780. "HW caps: func.MaxMTU = %d\n",
  781. caps->max_mtu);
  782. break;
  783. default:
  784. ice_debug(hw, ICE_DBG_INIT,
  785. "HW caps: Unknown capability[%d]: 0x%x\n", i,
  786. cap);
  787. break;
  788. }
  789. }
  790. }
  791. /**
  792. * ice_aq_discover_caps - query function/device capabilities
  793. * @hw: pointer to the hw struct
  794. * @buf: a virtual buffer to hold the capabilities
  795. * @buf_size: Size of the virtual buffer
  796. * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
  797. * @opc: capabilities type to discover - pass in the command opcode
  798. * @cd: pointer to command details structure or NULL
  799. *
  800. * Get the function(0x000a)/device(0x000b) capabilities description from
  801. * the firmware.
  802. */
  803. static enum ice_status
  804. ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
  805. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  806. {
  807. struct ice_aqc_list_caps *cmd;
  808. struct ice_aq_desc desc;
  809. enum ice_status status;
  810. cmd = &desc.params.get_cap;
  811. if (opc != ice_aqc_opc_list_func_caps &&
  812. opc != ice_aqc_opc_list_dev_caps)
  813. return ICE_ERR_PARAM;
  814. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  815. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  816. if (!status)
  817. ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
  818. *data_size = le16_to_cpu(desc.datalen);
  819. return status;
  820. }
  821. /**
  822. * ice_get_caps - get info about the HW
  823. * @hw: pointer to the hardware structure
  824. */
  825. enum ice_status ice_get_caps(struct ice_hw *hw)
  826. {
  827. enum ice_status status;
  828. u16 data_size = 0;
  829. u16 cbuf_len;
  830. u8 retries;
  831. /* The driver doesn't know how many capabilities the device will return
  832. * so the buffer size required isn't known ahead of time. The driver
  833. * starts with cbuf_len and if this turns out to be insufficient, the
  834. * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
  835. * The driver then allocates the buffer of this size and retries the
  836. * operation. So it follows that the retry count is 2.
  837. */
  838. #define ICE_GET_CAP_BUF_COUNT 40
  839. #define ICE_GET_CAP_RETRY_COUNT 2
  840. cbuf_len = ICE_GET_CAP_BUF_COUNT *
  841. sizeof(struct ice_aqc_list_caps_elem);
  842. retries = ICE_GET_CAP_RETRY_COUNT;
  843. do {
  844. void *cbuf;
  845. cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
  846. if (!cbuf)
  847. return ICE_ERR_NO_MEMORY;
  848. status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
  849. ice_aqc_opc_list_func_caps, NULL);
  850. devm_kfree(ice_hw_to_dev(hw), cbuf);
  851. if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
  852. break;
  853. /* If ENOMEM is returned, try again with bigger buffer */
  854. cbuf_len = data_size;
  855. } while (--retries);
  856. return status;
  857. }
  858. /**
  859. * ice_aq_clear_pxe_mode
  860. * @hw: pointer to the hw struct
  861. *
  862. * Tell the firmware that the driver is taking over from PXE (0x0110).
  863. */
  864. static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
  865. {
  866. struct ice_aq_desc desc;
  867. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
  868. desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
  869. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  870. }
  871. /**
  872. * ice_clear_pxe_mode - clear pxe operations mode
  873. * @hw: pointer to the hw struct
  874. *
  875. * Make sure all PXE mode settings are cleared, including things
  876. * like descriptor fetch/write-back mode.
  877. */
  878. void ice_clear_pxe_mode(struct ice_hw *hw)
  879. {
  880. if (ice_check_sq_alive(hw, &hw->adminq))
  881. ice_aq_clear_pxe_mode(hw);
  882. }