ice_common.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_common.h"
  4. #include "ice_sched.h"
  5. #include "ice_adminq_cmd.h"
  6. #define ICE_PF_RESET_WAIT_COUNT 200
  7. /**
  8. * ice_set_mac_type - Sets MAC type
  9. * @hw: pointer to the HW structure
  10. *
  11. * This function sets the MAC type of the adapter based on the
  12. * vendor ID and device ID stored in the hw structure.
  13. */
  14. static enum ice_status ice_set_mac_type(struct ice_hw *hw)
  15. {
  16. if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
  17. return ICE_ERR_DEVICE_NOT_SUPPORTED;
  18. hw->mac_type = ICE_MAC_GENERIC;
  19. return 0;
  20. }
  21. /**
  22. * ice_clear_pf_cfg - Clear PF configuration
  23. * @hw: pointer to the hardware structure
  24. */
  25. enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
  26. {
  27. struct ice_aq_desc desc;
  28. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
  29. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  30. }
  31. /**
  32. * ice_aq_manage_mac_read - manage MAC address read command
  33. * @hw: pointer to the hw struct
  34. * @buf: a virtual buffer to hold the manage MAC read response
  35. * @buf_size: Size of the virtual buffer
  36. * @cd: pointer to command details structure or NULL
  37. *
  38. * This function is used to return per PF station MAC address (0x0107).
  39. * NOTE: Upon successful completion of this command, MAC address information
  40. * is returned in user specified buffer. Please interpret user specified
  41. * buffer as "manage_mac_read" response.
  42. * Response such as various MAC addresses are stored in HW struct (port.mac)
  43. * ice_aq_discover_caps is expected to be called before this function is called.
  44. */
  45. static enum ice_status
  46. ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
  47. struct ice_sq_cd *cd)
  48. {
  49. struct ice_aqc_manage_mac_read_resp *resp;
  50. struct ice_aqc_manage_mac_read *cmd;
  51. struct ice_aq_desc desc;
  52. enum ice_status status;
  53. u16 flags;
  54. cmd = &desc.params.mac_read;
  55. if (buf_size < sizeof(*resp))
  56. return ICE_ERR_BUF_TOO_SHORT;
  57. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
  58. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  59. if (status)
  60. return status;
  61. resp = (struct ice_aqc_manage_mac_read_resp *)buf;
  62. flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
  63. if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
  64. ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
  65. return ICE_ERR_CFG;
  66. }
  67. ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
  68. ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
  69. return 0;
  70. }
  71. /**
  72. * ice_aq_get_phy_caps - returns PHY capabilities
  73. * @pi: port information structure
  74. * @qual_mods: report qualified modules
  75. * @report_mode: report mode capabilities
  76. * @pcaps: structure for PHY capabilities to be filled
  77. * @cd: pointer to command details structure or NULL
  78. *
  79. * Returns the various PHY capabilities supported on the Port (0x0600)
  80. */
  81. static enum ice_status
  82. ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
  83. struct ice_aqc_get_phy_caps_data *pcaps,
  84. struct ice_sq_cd *cd)
  85. {
  86. struct ice_aqc_get_phy_caps *cmd;
  87. u16 pcaps_size = sizeof(*pcaps);
  88. struct ice_aq_desc desc;
  89. enum ice_status status;
  90. cmd = &desc.params.get_phy;
  91. if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
  92. return ICE_ERR_PARAM;
  93. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
  94. if (qual_mods)
  95. cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
  96. cmd->param0 |= cpu_to_le16(report_mode);
  97. status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
  98. if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
  99. pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
  100. return status;
  101. }
  102. /**
  103. * ice_get_media_type - Gets media type
  104. * @pi: port information structure
  105. */
  106. static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
  107. {
  108. struct ice_link_status *hw_link_info;
  109. if (!pi)
  110. return ICE_MEDIA_UNKNOWN;
  111. hw_link_info = &pi->phy.link_info;
  112. if (hw_link_info->phy_type_low) {
  113. switch (hw_link_info->phy_type_low) {
  114. case ICE_PHY_TYPE_LOW_1000BASE_SX:
  115. case ICE_PHY_TYPE_LOW_1000BASE_LX:
  116. case ICE_PHY_TYPE_LOW_10GBASE_SR:
  117. case ICE_PHY_TYPE_LOW_10GBASE_LR:
  118. case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
  119. case ICE_PHY_TYPE_LOW_25GBASE_SR:
  120. case ICE_PHY_TYPE_LOW_25GBASE_LR:
  121. case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
  122. case ICE_PHY_TYPE_LOW_40GBASE_SR4:
  123. case ICE_PHY_TYPE_LOW_40GBASE_LR4:
  124. return ICE_MEDIA_FIBER;
  125. case ICE_PHY_TYPE_LOW_100BASE_TX:
  126. case ICE_PHY_TYPE_LOW_1000BASE_T:
  127. case ICE_PHY_TYPE_LOW_2500BASE_T:
  128. case ICE_PHY_TYPE_LOW_5GBASE_T:
  129. case ICE_PHY_TYPE_LOW_10GBASE_T:
  130. case ICE_PHY_TYPE_LOW_25GBASE_T:
  131. return ICE_MEDIA_BASET;
  132. case ICE_PHY_TYPE_LOW_10G_SFI_DA:
  133. case ICE_PHY_TYPE_LOW_25GBASE_CR:
  134. case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
  135. case ICE_PHY_TYPE_LOW_25GBASE_CR1:
  136. case ICE_PHY_TYPE_LOW_40GBASE_CR4:
  137. return ICE_MEDIA_DA;
  138. case ICE_PHY_TYPE_LOW_1000BASE_KX:
  139. case ICE_PHY_TYPE_LOW_2500BASE_KX:
  140. case ICE_PHY_TYPE_LOW_2500BASE_X:
  141. case ICE_PHY_TYPE_LOW_5GBASE_KR:
  142. case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
  143. case ICE_PHY_TYPE_LOW_25GBASE_KR:
  144. case ICE_PHY_TYPE_LOW_25GBASE_KR1:
  145. case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
  146. case ICE_PHY_TYPE_LOW_40GBASE_KR4:
  147. return ICE_MEDIA_BACKPLANE;
  148. }
  149. }
  150. return ICE_MEDIA_UNKNOWN;
  151. }
  152. /**
  153. * ice_aq_get_link_info
  154. * @pi: port information structure
  155. * @ena_lse: enable/disable LinkStatusEvent reporting
  156. * @link: pointer to link status structure - optional
  157. * @cd: pointer to command details structure or NULL
  158. *
  159. * Get Link Status (0x607). Returns the link status of the adapter.
  160. */
  161. enum ice_status
  162. ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
  163. struct ice_link_status *link, struct ice_sq_cd *cd)
  164. {
  165. struct ice_link_status *hw_link_info_old, *hw_link_info;
  166. struct ice_aqc_get_link_status_data link_data = { 0 };
  167. struct ice_aqc_get_link_status *resp;
  168. enum ice_media_type *hw_media_type;
  169. struct ice_fc_info *hw_fc_info;
  170. bool tx_pause, rx_pause;
  171. struct ice_aq_desc desc;
  172. enum ice_status status;
  173. u16 cmd_flags;
  174. if (!pi)
  175. return ICE_ERR_PARAM;
  176. hw_link_info_old = &pi->phy.link_info_old;
  177. hw_media_type = &pi->phy.media_type;
  178. hw_link_info = &pi->phy.link_info;
  179. hw_fc_info = &pi->fc;
  180. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
  181. cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
  182. resp = &desc.params.get_link_status;
  183. resp->cmd_flags = cpu_to_le16(cmd_flags);
  184. resp->lport_num = pi->lport;
  185. status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
  186. cd);
  187. if (status)
  188. return status;
  189. /* save off old link status information */
  190. *hw_link_info_old = *hw_link_info;
  191. /* update current link status information */
  192. hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
  193. hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
  194. *hw_media_type = ice_get_media_type(pi);
  195. hw_link_info->link_info = link_data.link_info;
  196. hw_link_info->an_info = link_data.an_info;
  197. hw_link_info->ext_info = link_data.ext_info;
  198. hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
  199. hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
  200. /* update fc info */
  201. tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
  202. rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
  203. if (tx_pause && rx_pause)
  204. hw_fc_info->current_mode = ICE_FC_FULL;
  205. else if (tx_pause)
  206. hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
  207. else if (rx_pause)
  208. hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
  209. else
  210. hw_fc_info->current_mode = ICE_FC_NONE;
  211. hw_link_info->lse_ena =
  212. !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
  213. /* save link status information */
  214. if (link)
  215. *link = *hw_link_info;
  216. /* flag cleared so calling functions don't call AQ again */
  217. pi->phy.get_link_info = false;
  218. return status;
  219. }
  220. /**
  221. * ice_init_hw - main hardware initialization routine
  222. * @hw: pointer to the hardware structure
  223. */
  224. enum ice_status ice_init_hw(struct ice_hw *hw)
  225. {
  226. struct ice_aqc_get_phy_caps_data *pcaps;
  227. enum ice_status status;
  228. u16 mac_buf_len;
  229. void *mac_buf;
  230. /* Set MAC type based on DeviceID */
  231. status = ice_set_mac_type(hw);
  232. if (status)
  233. return status;
  234. hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
  235. PF_FUNC_RID_FUNC_NUM_M) >>
  236. PF_FUNC_RID_FUNC_NUM_S;
  237. status = ice_reset(hw, ICE_RESET_PFR);
  238. if (status)
  239. return status;
  240. status = ice_init_all_ctrlq(hw);
  241. if (status)
  242. goto err_unroll_cqinit;
  243. status = ice_clear_pf_cfg(hw);
  244. if (status)
  245. goto err_unroll_cqinit;
  246. ice_clear_pxe_mode(hw);
  247. status = ice_init_nvm(hw);
  248. if (status)
  249. goto err_unroll_cqinit;
  250. status = ice_get_caps(hw);
  251. if (status)
  252. goto err_unroll_cqinit;
  253. hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
  254. sizeof(*hw->port_info), GFP_KERNEL);
  255. if (!hw->port_info) {
  256. status = ICE_ERR_NO_MEMORY;
  257. goto err_unroll_cqinit;
  258. }
  259. /* set the back pointer to hw */
  260. hw->port_info->hw = hw;
  261. /* Initialize port_info struct with switch configuration data */
  262. status = ice_get_initial_sw_cfg(hw);
  263. if (status)
  264. goto err_unroll_alloc;
  265. /* Query the allocated resources for tx scheduler */
  266. status = ice_sched_query_res_alloc(hw);
  267. if (status) {
  268. ice_debug(hw, ICE_DBG_SCHED,
  269. "Failed to get scheduler allocated resources\n");
  270. goto err_unroll_alloc;
  271. }
  272. /* Initialize port_info struct with scheduler data */
  273. status = ice_sched_init_port(hw->port_info);
  274. if (status)
  275. goto err_unroll_sched;
  276. pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
  277. if (!pcaps) {
  278. status = ICE_ERR_NO_MEMORY;
  279. goto err_unroll_sched;
  280. }
  281. /* Initialize port_info struct with PHY capabilities */
  282. status = ice_aq_get_phy_caps(hw->port_info, false,
  283. ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
  284. devm_kfree(ice_hw_to_dev(hw), pcaps);
  285. if (status)
  286. goto err_unroll_sched;
  287. /* Initialize port_info struct with link information */
  288. status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
  289. if (status)
  290. goto err_unroll_sched;
  291. /* Get port MAC information */
  292. mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
  293. mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
  294. if (!mac_buf)
  295. goto err_unroll_sched;
  296. status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
  297. devm_kfree(ice_hw_to_dev(hw), mac_buf);
  298. if (status)
  299. goto err_unroll_sched;
  300. return 0;
  301. err_unroll_sched:
  302. ice_sched_cleanup_all(hw);
  303. err_unroll_alloc:
  304. devm_kfree(ice_hw_to_dev(hw), hw->port_info);
  305. err_unroll_cqinit:
  306. ice_shutdown_all_ctrlq(hw);
  307. return status;
  308. }
  309. /**
  310. * ice_deinit_hw - unroll initialization operations done by ice_init_hw
  311. * @hw: pointer to the hardware structure
  312. */
  313. void ice_deinit_hw(struct ice_hw *hw)
  314. {
  315. ice_sched_cleanup_all(hw);
  316. ice_shutdown_all_ctrlq(hw);
  317. if (hw->port_info) {
  318. devm_kfree(ice_hw_to_dev(hw), hw->port_info);
  319. hw->port_info = NULL;
  320. }
  321. }
  322. /**
  323. * ice_check_reset - Check to see if a global reset is complete
  324. * @hw: pointer to the hardware structure
  325. */
  326. enum ice_status ice_check_reset(struct ice_hw *hw)
  327. {
  328. u32 cnt, reg = 0, grst_delay;
  329. /* Poll for Device Active state in case a recent CORER, GLOBR,
  330. * or EMPR has occurred. The grst delay value is in 100ms units.
  331. * Add 1sec for outstanding AQ commands that can take a long time.
  332. */
  333. grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
  334. GLGEN_RSTCTL_GRSTDEL_S) + 10;
  335. for (cnt = 0; cnt < grst_delay; cnt++) {
  336. mdelay(100);
  337. reg = rd32(hw, GLGEN_RSTAT);
  338. if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
  339. break;
  340. }
  341. if (cnt == grst_delay) {
  342. ice_debug(hw, ICE_DBG_INIT,
  343. "Global reset polling failed to complete.\n");
  344. return ICE_ERR_RESET_FAILED;
  345. }
  346. #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
  347. GLNVM_ULD_GLOBR_DONE_M)
  348. /* Device is Active; check Global Reset processes are done */
  349. for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
  350. reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
  351. if (reg == ICE_RESET_DONE_MASK) {
  352. ice_debug(hw, ICE_DBG_INIT,
  353. "Global reset processes done. %d\n", cnt);
  354. break;
  355. }
  356. mdelay(10);
  357. }
  358. if (cnt == ICE_PF_RESET_WAIT_COUNT) {
  359. ice_debug(hw, ICE_DBG_INIT,
  360. "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
  361. reg);
  362. return ICE_ERR_RESET_FAILED;
  363. }
  364. return 0;
  365. }
  366. /**
  367. * ice_pf_reset - Reset the PF
  368. * @hw: pointer to the hardware structure
  369. *
  370. * If a global reset has been triggered, this function checks
  371. * for its completion and then issues the PF reset
  372. */
  373. static enum ice_status ice_pf_reset(struct ice_hw *hw)
  374. {
  375. u32 cnt, reg;
  376. /* If at function entry a global reset was already in progress, i.e.
  377. * state is not 'device active' or any of the reset done bits are not
  378. * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
  379. * global reset is done.
  380. */
  381. if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
  382. (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
  383. /* poll on global reset currently in progress until done */
  384. if (ice_check_reset(hw))
  385. return ICE_ERR_RESET_FAILED;
  386. return 0;
  387. }
  388. /* Reset the PF */
  389. reg = rd32(hw, PFGEN_CTRL);
  390. wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
  391. for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
  392. reg = rd32(hw, PFGEN_CTRL);
  393. if (!(reg & PFGEN_CTRL_PFSWR_M))
  394. break;
  395. mdelay(1);
  396. }
  397. if (cnt == ICE_PF_RESET_WAIT_COUNT) {
  398. ice_debug(hw, ICE_DBG_INIT,
  399. "PF reset polling failed to complete.\n");
  400. return ICE_ERR_RESET_FAILED;
  401. }
  402. return 0;
  403. }
  404. /**
  405. * ice_reset - Perform different types of reset
  406. * @hw: pointer to the hardware structure
  407. * @req: reset request
  408. *
  409. * This function triggers a reset as specified by the req parameter.
  410. *
  411. * Note:
  412. * If anything other than a PF reset is triggered, PXE mode is restored.
  413. * This has to be cleared using ice_clear_pxe_mode again, once the AQ
  414. * interface has been restored in the rebuild flow.
  415. */
  416. enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
  417. {
  418. u32 val = 0;
  419. switch (req) {
  420. case ICE_RESET_PFR:
  421. return ice_pf_reset(hw);
  422. case ICE_RESET_CORER:
  423. ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
  424. val = GLGEN_RTRIG_CORER_M;
  425. break;
  426. case ICE_RESET_GLOBR:
  427. ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
  428. val = GLGEN_RTRIG_GLOBR_M;
  429. break;
  430. }
  431. val |= rd32(hw, GLGEN_RTRIG);
  432. wr32(hw, GLGEN_RTRIG, val);
  433. ice_flush(hw);
  434. /* wait for the FW to be ready */
  435. return ice_check_reset(hw);
  436. }
  437. /**
  438. * ice_debug_cq
  439. * @hw: pointer to the hardware structure
  440. * @mask: debug mask
  441. * @desc: pointer to control queue descriptor
  442. * @buf: pointer to command buffer
  443. * @buf_len: max length of buf
  444. *
  445. * Dumps debug log about control command with descriptor contents.
  446. */
  447. void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
  448. void *buf, u16 buf_len)
  449. {
  450. struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
  451. u16 len;
  452. #ifndef CONFIG_DYNAMIC_DEBUG
  453. if (!(mask & hw->debug_mask))
  454. return;
  455. #endif
  456. if (!desc)
  457. return;
  458. len = le16_to_cpu(cq_desc->datalen);
  459. ice_debug(hw, mask,
  460. "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
  461. le16_to_cpu(cq_desc->opcode),
  462. le16_to_cpu(cq_desc->flags),
  463. le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
  464. ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
  465. le32_to_cpu(cq_desc->cookie_high),
  466. le32_to_cpu(cq_desc->cookie_low));
  467. ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
  468. le32_to_cpu(cq_desc->params.generic.param0),
  469. le32_to_cpu(cq_desc->params.generic.param1));
  470. ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
  471. le32_to_cpu(cq_desc->params.generic.addr_high),
  472. le32_to_cpu(cq_desc->params.generic.addr_low));
  473. if (buf && cq_desc->datalen != 0) {
  474. ice_debug(hw, mask, "Buffer:\n");
  475. if (buf_len < len)
  476. len = buf_len;
  477. ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
  478. }
  479. }
  480. /* FW Admin Queue command wrappers */
  481. /**
  482. * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
  483. * @hw: pointer to the hw struct
  484. * @desc: descriptor describing the command
  485. * @buf: buffer to use for indirect commands (NULL for direct commands)
  486. * @buf_size: size of buffer for indirect commands (0 for direct commands)
  487. * @cd: pointer to command details structure
  488. *
  489. * Helper function to send FW Admin Queue commands to the FW Admin Queue.
  490. */
  491. enum ice_status
  492. ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
  493. u16 buf_size, struct ice_sq_cd *cd)
  494. {
  495. return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
  496. }
  497. /**
  498. * ice_aq_get_fw_ver
  499. * @hw: pointer to the hw struct
  500. * @cd: pointer to command details structure or NULL
  501. *
  502. * Get the firmware version (0x0001) from the admin queue commands
  503. */
  504. enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
  505. {
  506. struct ice_aqc_get_ver *resp;
  507. struct ice_aq_desc desc;
  508. enum ice_status status;
  509. resp = &desc.params.get_ver;
  510. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
  511. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  512. if (!status) {
  513. hw->fw_branch = resp->fw_branch;
  514. hw->fw_maj_ver = resp->fw_major;
  515. hw->fw_min_ver = resp->fw_minor;
  516. hw->fw_patch = resp->fw_patch;
  517. hw->fw_build = le32_to_cpu(resp->fw_build);
  518. hw->api_branch = resp->api_branch;
  519. hw->api_maj_ver = resp->api_major;
  520. hw->api_min_ver = resp->api_minor;
  521. hw->api_patch = resp->api_patch;
  522. }
  523. return status;
  524. }
  525. /**
  526. * ice_aq_q_shutdown
  527. * @hw: pointer to the hw struct
  528. * @unloading: is the driver unloading itself
  529. *
  530. * Tell the Firmware that we're shutting down the AdminQ and whether
  531. * or not the driver is unloading as well (0x0003).
  532. */
  533. enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
  534. {
  535. struct ice_aqc_q_shutdown *cmd;
  536. struct ice_aq_desc desc;
  537. cmd = &desc.params.q_shutdown;
  538. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
  539. if (unloading)
  540. cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
  541. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  542. }
  543. /**
  544. * ice_aq_req_res
  545. * @hw: pointer to the hw struct
  546. * @res: resource id
  547. * @access: access type
  548. * @sdp_number: resource number
  549. * @timeout: the maximum time in ms that the driver may hold the resource
  550. * @cd: pointer to command details structure or NULL
  551. *
  552. * requests common resource using the admin queue commands (0x0008)
  553. */
  554. static enum ice_status
  555. ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
  556. enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
  557. struct ice_sq_cd *cd)
  558. {
  559. struct ice_aqc_req_res *cmd_resp;
  560. struct ice_aq_desc desc;
  561. enum ice_status status;
  562. cmd_resp = &desc.params.res_owner;
  563. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
  564. cmd_resp->res_id = cpu_to_le16(res);
  565. cmd_resp->access_type = cpu_to_le16(access);
  566. cmd_resp->res_number = cpu_to_le32(sdp_number);
  567. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  568. /* The completion specifies the maximum time in ms that the driver
  569. * may hold the resource in the Timeout field.
  570. * If the resource is held by someone else, the command completes with
  571. * busy return value and the timeout field indicates the maximum time
  572. * the current owner of the resource has to free it.
  573. */
  574. if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
  575. *timeout = le32_to_cpu(cmd_resp->timeout);
  576. return status;
  577. }
  578. /**
  579. * ice_aq_release_res
  580. * @hw: pointer to the hw struct
  581. * @res: resource id
  582. * @sdp_number: resource number
  583. * @cd: pointer to command details structure or NULL
  584. *
  585. * release common resource using the admin queue commands (0x0009)
  586. */
  587. static enum ice_status
  588. ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
  589. struct ice_sq_cd *cd)
  590. {
  591. struct ice_aqc_req_res *cmd;
  592. struct ice_aq_desc desc;
  593. cmd = &desc.params.res_owner;
  594. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
  595. cmd->res_id = cpu_to_le16(res);
  596. cmd->res_number = cpu_to_le32(sdp_number);
  597. return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  598. }
  599. /**
  600. * ice_acquire_res
  601. * @hw: pointer to the HW structure
  602. * @res: resource id
  603. * @access: access type (read or write)
  604. *
  605. * This function will attempt to acquire the ownership of a resource.
  606. */
  607. enum ice_status
  608. ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
  609. enum ice_aq_res_access_type access)
  610. {
  611. #define ICE_RES_POLLING_DELAY_MS 10
  612. u32 delay = ICE_RES_POLLING_DELAY_MS;
  613. enum ice_status status;
  614. u32 time_left = 0;
  615. u32 timeout;
  616. status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
  617. /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
  618. * driver has previously acquired the resource and performed any
  619. * necessary updates; in this case the caller does not obtain the
  620. * resource and has no further work to do.
  621. */
  622. if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
  623. status = ICE_ERR_AQ_NO_WORK;
  624. goto ice_acquire_res_exit;
  625. }
  626. if (status)
  627. ice_debug(hw, ICE_DBG_RES,
  628. "resource %d acquire type %d failed.\n", res, access);
  629. /* If necessary, poll until the current lock owner timeouts */
  630. timeout = time_left;
  631. while (status && timeout && time_left) {
  632. mdelay(delay);
  633. timeout = (timeout > delay) ? timeout - delay : 0;
  634. status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
  635. if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
  636. /* lock free, but no work to do */
  637. status = ICE_ERR_AQ_NO_WORK;
  638. break;
  639. }
  640. if (!status)
  641. /* lock acquired */
  642. break;
  643. }
  644. if (status && status != ICE_ERR_AQ_NO_WORK)
  645. ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
  646. ice_acquire_res_exit:
  647. if (status == ICE_ERR_AQ_NO_WORK) {
  648. if (access == ICE_RES_WRITE)
  649. ice_debug(hw, ICE_DBG_RES,
  650. "resource indicates no work to do.\n");
  651. else
  652. ice_debug(hw, ICE_DBG_RES,
  653. "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
  654. }
  655. return status;
  656. }
  657. /**
  658. * ice_release_res
  659. * @hw: pointer to the HW structure
  660. * @res: resource id
  661. *
  662. * This function will release a resource using the proper Admin Command.
  663. */
  664. void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
  665. {
  666. enum ice_status status;
  667. u32 total_delay = 0;
  668. status = ice_aq_release_res(hw, res, 0, NULL);
  669. /* there are some rare cases when trying to release the resource
  670. * results in an admin Q timeout, so handle them correctly
  671. */
  672. while ((status == ICE_ERR_AQ_TIMEOUT) &&
  673. (total_delay < hw->adminq.sq_cmd_timeout)) {
  674. mdelay(1);
  675. status = ice_aq_release_res(hw, res, 0, NULL);
  676. total_delay++;
  677. }
  678. }
  679. /**
  680. * ice_parse_caps - parse function/device capabilities
  681. * @hw: pointer to the hw struct
  682. * @buf: pointer to a buffer containing function/device capability records
  683. * @cap_count: number of capability records in the list
  684. * @opc: type of capabilities list to parse
  685. *
  686. * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
  687. */
  688. static void
  689. ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
  690. enum ice_adminq_opc opc)
  691. {
  692. struct ice_aqc_list_caps_elem *cap_resp;
  693. struct ice_hw_func_caps *func_p = NULL;
  694. struct ice_hw_dev_caps *dev_p = NULL;
  695. struct ice_hw_common_caps *caps;
  696. u32 i;
  697. if (!buf)
  698. return;
  699. cap_resp = (struct ice_aqc_list_caps_elem *)buf;
  700. if (opc == ice_aqc_opc_list_dev_caps) {
  701. dev_p = &hw->dev_caps;
  702. caps = &dev_p->common_cap;
  703. } else if (opc == ice_aqc_opc_list_func_caps) {
  704. func_p = &hw->func_caps;
  705. caps = &func_p->common_cap;
  706. } else {
  707. ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
  708. return;
  709. }
  710. for (i = 0; caps && i < cap_count; i++, cap_resp++) {
  711. u32 logical_id = le32_to_cpu(cap_resp->logical_id);
  712. u32 phys_id = le32_to_cpu(cap_resp->phys_id);
  713. u32 number = le32_to_cpu(cap_resp->number);
  714. u16 cap = le16_to_cpu(cap_resp->cap);
  715. switch (cap) {
  716. case ICE_AQC_CAPS_VSI:
  717. if (dev_p) {
  718. dev_p->num_vsi_allocd_to_host = number;
  719. ice_debug(hw, ICE_DBG_INIT,
  720. "HW caps: Dev.VSI cnt = %d\n",
  721. dev_p->num_vsi_allocd_to_host);
  722. } else if (func_p) {
  723. func_p->guaranteed_num_vsi = number;
  724. ice_debug(hw, ICE_DBG_INIT,
  725. "HW caps: Func.VSI cnt = %d\n",
  726. func_p->guaranteed_num_vsi);
  727. }
  728. break;
  729. case ICE_AQC_CAPS_RSS:
  730. caps->rss_table_size = number;
  731. caps->rss_table_entry_width = logical_id;
  732. ice_debug(hw, ICE_DBG_INIT,
  733. "HW caps: RSS table size = %d\n",
  734. caps->rss_table_size);
  735. ice_debug(hw, ICE_DBG_INIT,
  736. "HW caps: RSS table width = %d\n",
  737. caps->rss_table_entry_width);
  738. break;
  739. case ICE_AQC_CAPS_RXQS:
  740. caps->num_rxq = number;
  741. caps->rxq_first_id = phys_id;
  742. ice_debug(hw, ICE_DBG_INIT,
  743. "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
  744. ice_debug(hw, ICE_DBG_INIT,
  745. "HW caps: Rx first queue ID = %d\n",
  746. caps->rxq_first_id);
  747. break;
  748. case ICE_AQC_CAPS_TXQS:
  749. caps->num_txq = number;
  750. caps->txq_first_id = phys_id;
  751. ice_debug(hw, ICE_DBG_INIT,
  752. "HW caps: Num Tx Qs = %d\n", caps->num_txq);
  753. ice_debug(hw, ICE_DBG_INIT,
  754. "HW caps: Tx first queue ID = %d\n",
  755. caps->txq_first_id);
  756. break;
  757. case ICE_AQC_CAPS_MSIX:
  758. caps->num_msix_vectors = number;
  759. caps->msix_vector_first_id = phys_id;
  760. ice_debug(hw, ICE_DBG_INIT,
  761. "HW caps: MSIX vector count = %d\n",
  762. caps->num_msix_vectors);
  763. ice_debug(hw, ICE_DBG_INIT,
  764. "HW caps: MSIX first vector index = %d\n",
  765. caps->msix_vector_first_id);
  766. break;
  767. case ICE_AQC_CAPS_MAX_MTU:
  768. caps->max_mtu = number;
  769. if (dev_p)
  770. ice_debug(hw, ICE_DBG_INIT,
  771. "HW caps: Dev.MaxMTU = %d\n",
  772. caps->max_mtu);
  773. else if (func_p)
  774. ice_debug(hw, ICE_DBG_INIT,
  775. "HW caps: func.MaxMTU = %d\n",
  776. caps->max_mtu);
  777. break;
  778. default:
  779. ice_debug(hw, ICE_DBG_INIT,
  780. "HW caps: Unknown capability[%d]: 0x%x\n", i,
  781. cap);
  782. break;
  783. }
  784. }
  785. }
  786. /**
  787. * ice_aq_discover_caps - query function/device capabilities
  788. * @hw: pointer to the hw struct
  789. * @buf: a virtual buffer to hold the capabilities
  790. * @buf_size: Size of the virtual buffer
  791. * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
  792. * @opc: capabilities type to discover - pass in the command opcode
  793. * @cd: pointer to command details structure or NULL
  794. *
  795. * Get the function(0x000a)/device(0x000b) capabilities description from
  796. * the firmware.
  797. */
  798. static enum ice_status
  799. ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
  800. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  801. {
  802. struct ice_aqc_list_caps *cmd;
  803. struct ice_aq_desc desc;
  804. enum ice_status status;
  805. cmd = &desc.params.get_cap;
  806. if (opc != ice_aqc_opc_list_func_caps &&
  807. opc != ice_aqc_opc_list_dev_caps)
  808. return ICE_ERR_PARAM;
  809. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  810. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  811. if (!status)
  812. ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
  813. *data_size = le16_to_cpu(desc.datalen);
  814. return status;
  815. }
  816. /**
  817. * ice_get_caps - get info about the HW
  818. * @hw: pointer to the hardware structure
  819. */
  820. enum ice_status ice_get_caps(struct ice_hw *hw)
  821. {
  822. enum ice_status status;
  823. u16 data_size = 0;
  824. u16 cbuf_len;
  825. u8 retries;
  826. /* The driver doesn't know how many capabilities the device will return
  827. * so the buffer size required isn't known ahead of time. The driver
  828. * starts with cbuf_len and if this turns out to be insufficient, the
  829. * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
  830. * The driver then allocates the buffer of this size and retries the
  831. * operation. So it follows that the retry count is 2.
  832. */
  833. #define ICE_GET_CAP_BUF_COUNT 40
  834. #define ICE_GET_CAP_RETRY_COUNT 2
  835. cbuf_len = ICE_GET_CAP_BUF_COUNT *
  836. sizeof(struct ice_aqc_list_caps_elem);
  837. retries = ICE_GET_CAP_RETRY_COUNT;
  838. do {
  839. void *cbuf;
  840. cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
  841. if (!cbuf)
  842. return ICE_ERR_NO_MEMORY;
  843. status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
  844. ice_aqc_opc_list_func_caps, NULL);
  845. devm_kfree(ice_hw_to_dev(hw), cbuf);
  846. if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
  847. break;
  848. /* If ENOMEM is returned, try again with bigger buffer */
  849. cbuf_len = data_size;
  850. } while (--retries);
  851. return status;
  852. }
  853. /**
  854. * ice_aq_clear_pxe_mode
  855. * @hw: pointer to the hw struct
  856. *
  857. * Tell the firmware that the driver is taking over from PXE (0x0110).
  858. */
  859. static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
  860. {
  861. struct ice_aq_desc desc;
  862. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
  863. desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
  864. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  865. }
  866. /**
  867. * ice_clear_pxe_mode - clear pxe operations mode
  868. * @hw: pointer to the hw struct
  869. *
  870. * Make sure all PXE mode settings are cleared, including things
  871. * like descriptor fetch/write-back mode.
  872. */
  873. void ice_clear_pxe_mode(struct ice_hw *hw)
  874. {
  875. if (ice_check_sq_alive(hw, &hw->adminq))
  876. ice_aq_clear_pxe_mode(hw);
  877. }