ice_common.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_common.h"
  4. #include "ice_sched.h"
  5. #include "ice_adminq_cmd.h"
  6. #define ICE_PF_RESET_WAIT_COUNT 200
  7. /**
  8. * ice_set_mac_type - Sets MAC type
  9. * @hw: pointer to the HW structure
  10. *
  11. * This function sets the MAC type of the adapter based on the
  12. * vendor ID and device ID stored in the hw structure.
  13. */
  14. static enum ice_status ice_set_mac_type(struct ice_hw *hw)
  15. {
  16. if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
  17. return ICE_ERR_DEVICE_NOT_SUPPORTED;
  18. hw->mac_type = ICE_MAC_GENERIC;
  19. return 0;
  20. }
  21. /**
  22. * ice_clear_pf_cfg - Clear PF configuration
  23. * @hw: pointer to the hardware structure
  24. */
  25. enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
  26. {
  27. struct ice_aq_desc desc;
  28. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
  29. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  30. }
  31. /**
  32. * ice_init_hw - main hardware initialization routine
  33. * @hw: pointer to the hardware structure
  34. */
  35. enum ice_status ice_init_hw(struct ice_hw *hw)
  36. {
  37. enum ice_status status;
  38. /* Set MAC type based on DeviceID */
  39. status = ice_set_mac_type(hw);
  40. if (status)
  41. return status;
  42. hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
  43. PF_FUNC_RID_FUNC_NUM_M) >>
  44. PF_FUNC_RID_FUNC_NUM_S;
  45. status = ice_reset(hw, ICE_RESET_PFR);
  46. if (status)
  47. return status;
  48. status = ice_init_all_ctrlq(hw);
  49. if (status)
  50. goto err_unroll_cqinit;
  51. status = ice_clear_pf_cfg(hw);
  52. if (status)
  53. goto err_unroll_cqinit;
  54. ice_clear_pxe_mode(hw);
  55. status = ice_init_nvm(hw);
  56. if (status)
  57. goto err_unroll_cqinit;
  58. status = ice_get_caps(hw);
  59. if (status)
  60. goto err_unroll_cqinit;
  61. hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
  62. sizeof(*hw->port_info), GFP_KERNEL);
  63. if (!hw->port_info) {
  64. status = ICE_ERR_NO_MEMORY;
  65. goto err_unroll_cqinit;
  66. }
  67. /* set the back pointer to hw */
  68. hw->port_info->hw = hw;
  69. /* Initialize port_info struct with switch configuration data */
  70. status = ice_get_initial_sw_cfg(hw);
  71. if (status)
  72. goto err_unroll_alloc;
  73. /* Query the allocated resources for tx scheduler */
  74. status = ice_sched_query_res_alloc(hw);
  75. if (status) {
  76. ice_debug(hw, ICE_DBG_SCHED,
  77. "Failed to get scheduler allocated resources\n");
  78. goto err_unroll_alloc;
  79. }
  80. return 0;
  81. err_unroll_alloc:
  82. devm_kfree(ice_hw_to_dev(hw), hw->port_info);
  83. err_unroll_cqinit:
  84. ice_shutdown_all_ctrlq(hw);
  85. return status;
  86. }
  87. /**
  88. * ice_deinit_hw - unroll initialization operations done by ice_init_hw
  89. * @hw: pointer to the hardware structure
  90. */
  91. void ice_deinit_hw(struct ice_hw *hw)
  92. {
  93. ice_sched_cleanup_all(hw);
  94. ice_shutdown_all_ctrlq(hw);
  95. if (hw->port_info) {
  96. devm_kfree(ice_hw_to_dev(hw), hw->port_info);
  97. hw->port_info = NULL;
  98. }
  99. }
  100. /**
  101. * ice_check_reset - Check to see if a global reset is complete
  102. * @hw: pointer to the hardware structure
  103. */
  104. enum ice_status ice_check_reset(struct ice_hw *hw)
  105. {
  106. u32 cnt, reg = 0, grst_delay;
  107. /* Poll for Device Active state in case a recent CORER, GLOBR,
  108. * or EMPR has occurred. The grst delay value is in 100ms units.
  109. * Add 1sec for outstanding AQ commands that can take a long time.
  110. */
  111. grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
  112. GLGEN_RSTCTL_GRSTDEL_S) + 10;
  113. for (cnt = 0; cnt < grst_delay; cnt++) {
  114. mdelay(100);
  115. reg = rd32(hw, GLGEN_RSTAT);
  116. if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
  117. break;
  118. }
  119. if (cnt == grst_delay) {
  120. ice_debug(hw, ICE_DBG_INIT,
  121. "Global reset polling failed to complete.\n");
  122. return ICE_ERR_RESET_FAILED;
  123. }
  124. #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
  125. GLNVM_ULD_GLOBR_DONE_M)
  126. /* Device is Active; check Global Reset processes are done */
  127. for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
  128. reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
  129. if (reg == ICE_RESET_DONE_MASK) {
  130. ice_debug(hw, ICE_DBG_INIT,
  131. "Global reset processes done. %d\n", cnt);
  132. break;
  133. }
  134. mdelay(10);
  135. }
  136. if (cnt == ICE_PF_RESET_WAIT_COUNT) {
  137. ice_debug(hw, ICE_DBG_INIT,
  138. "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
  139. reg);
  140. return ICE_ERR_RESET_FAILED;
  141. }
  142. return 0;
  143. }
  144. /**
  145. * ice_pf_reset - Reset the PF
  146. * @hw: pointer to the hardware structure
  147. *
  148. * If a global reset has been triggered, this function checks
  149. * for its completion and then issues the PF reset
  150. */
  151. static enum ice_status ice_pf_reset(struct ice_hw *hw)
  152. {
  153. u32 cnt, reg;
  154. /* If at function entry a global reset was already in progress, i.e.
  155. * state is not 'device active' or any of the reset done bits are not
  156. * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
  157. * global reset is done.
  158. */
  159. if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
  160. (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
  161. /* poll on global reset currently in progress until done */
  162. if (ice_check_reset(hw))
  163. return ICE_ERR_RESET_FAILED;
  164. return 0;
  165. }
  166. /* Reset the PF */
  167. reg = rd32(hw, PFGEN_CTRL);
  168. wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
  169. for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
  170. reg = rd32(hw, PFGEN_CTRL);
  171. if (!(reg & PFGEN_CTRL_PFSWR_M))
  172. break;
  173. mdelay(1);
  174. }
  175. if (cnt == ICE_PF_RESET_WAIT_COUNT) {
  176. ice_debug(hw, ICE_DBG_INIT,
  177. "PF reset polling failed to complete.\n");
  178. return ICE_ERR_RESET_FAILED;
  179. }
  180. return 0;
  181. }
  182. /**
  183. * ice_reset - Perform different types of reset
  184. * @hw: pointer to the hardware structure
  185. * @req: reset request
  186. *
  187. * This function triggers a reset as specified by the req parameter.
  188. *
  189. * Note:
  190. * If anything other than a PF reset is triggered, PXE mode is restored.
  191. * This has to be cleared using ice_clear_pxe_mode again, once the AQ
  192. * interface has been restored in the rebuild flow.
  193. */
  194. enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
  195. {
  196. u32 val = 0;
  197. switch (req) {
  198. case ICE_RESET_PFR:
  199. return ice_pf_reset(hw);
  200. case ICE_RESET_CORER:
  201. ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
  202. val = GLGEN_RTRIG_CORER_M;
  203. break;
  204. case ICE_RESET_GLOBR:
  205. ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
  206. val = GLGEN_RTRIG_GLOBR_M;
  207. break;
  208. }
  209. val |= rd32(hw, GLGEN_RTRIG);
  210. wr32(hw, GLGEN_RTRIG, val);
  211. ice_flush(hw);
  212. /* wait for the FW to be ready */
  213. return ice_check_reset(hw);
  214. }
  215. /**
  216. * ice_debug_cq
  217. * @hw: pointer to the hardware structure
  218. * @mask: debug mask
  219. * @desc: pointer to control queue descriptor
  220. * @buf: pointer to command buffer
  221. * @buf_len: max length of buf
  222. *
  223. * Dumps debug log about control command with descriptor contents.
  224. */
  225. void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
  226. void *buf, u16 buf_len)
  227. {
  228. struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
  229. u16 len;
  230. #ifndef CONFIG_DYNAMIC_DEBUG
  231. if (!(mask & hw->debug_mask))
  232. return;
  233. #endif
  234. if (!desc)
  235. return;
  236. len = le16_to_cpu(cq_desc->datalen);
  237. ice_debug(hw, mask,
  238. "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
  239. le16_to_cpu(cq_desc->opcode),
  240. le16_to_cpu(cq_desc->flags),
  241. le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
  242. ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
  243. le32_to_cpu(cq_desc->cookie_high),
  244. le32_to_cpu(cq_desc->cookie_low));
  245. ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
  246. le32_to_cpu(cq_desc->params.generic.param0),
  247. le32_to_cpu(cq_desc->params.generic.param1));
  248. ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
  249. le32_to_cpu(cq_desc->params.generic.addr_high),
  250. le32_to_cpu(cq_desc->params.generic.addr_low));
  251. if (buf && cq_desc->datalen != 0) {
  252. ice_debug(hw, mask, "Buffer:\n");
  253. if (buf_len < len)
  254. len = buf_len;
  255. ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
  256. }
  257. }
  258. /* FW Admin Queue command wrappers */
  259. /**
  260. * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
  261. * @hw: pointer to the hw struct
  262. * @desc: descriptor describing the command
  263. * @buf: buffer to use for indirect commands (NULL for direct commands)
  264. * @buf_size: size of buffer for indirect commands (0 for direct commands)
  265. * @cd: pointer to command details structure
  266. *
  267. * Helper function to send FW Admin Queue commands to the FW Admin Queue.
  268. */
  269. enum ice_status
  270. ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
  271. u16 buf_size, struct ice_sq_cd *cd)
  272. {
  273. return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
  274. }
  275. /**
  276. * ice_aq_get_fw_ver
  277. * @hw: pointer to the hw struct
  278. * @cd: pointer to command details structure or NULL
  279. *
  280. * Get the firmware version (0x0001) from the admin queue commands
  281. */
  282. enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
  283. {
  284. struct ice_aqc_get_ver *resp;
  285. struct ice_aq_desc desc;
  286. enum ice_status status;
  287. resp = &desc.params.get_ver;
  288. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
  289. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  290. if (!status) {
  291. hw->fw_branch = resp->fw_branch;
  292. hw->fw_maj_ver = resp->fw_major;
  293. hw->fw_min_ver = resp->fw_minor;
  294. hw->fw_patch = resp->fw_patch;
  295. hw->fw_build = le32_to_cpu(resp->fw_build);
  296. hw->api_branch = resp->api_branch;
  297. hw->api_maj_ver = resp->api_major;
  298. hw->api_min_ver = resp->api_minor;
  299. hw->api_patch = resp->api_patch;
  300. }
  301. return status;
  302. }
  303. /**
  304. * ice_aq_q_shutdown
  305. * @hw: pointer to the hw struct
  306. * @unloading: is the driver unloading itself
  307. *
  308. * Tell the Firmware that we're shutting down the AdminQ and whether
  309. * or not the driver is unloading as well (0x0003).
  310. */
  311. enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
  312. {
  313. struct ice_aqc_q_shutdown *cmd;
  314. struct ice_aq_desc desc;
  315. cmd = &desc.params.q_shutdown;
  316. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
  317. if (unloading)
  318. cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
  319. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  320. }
  321. /**
  322. * ice_aq_req_res
  323. * @hw: pointer to the hw struct
  324. * @res: resource id
  325. * @access: access type
  326. * @sdp_number: resource number
  327. * @timeout: the maximum time in ms that the driver may hold the resource
  328. * @cd: pointer to command details structure or NULL
  329. *
  330. * requests common resource using the admin queue commands (0x0008)
  331. */
  332. static enum ice_status
  333. ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
  334. enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
  335. struct ice_sq_cd *cd)
  336. {
  337. struct ice_aqc_req_res *cmd_resp;
  338. struct ice_aq_desc desc;
  339. enum ice_status status;
  340. cmd_resp = &desc.params.res_owner;
  341. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
  342. cmd_resp->res_id = cpu_to_le16(res);
  343. cmd_resp->access_type = cpu_to_le16(access);
  344. cmd_resp->res_number = cpu_to_le32(sdp_number);
  345. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  346. /* The completion specifies the maximum time in ms that the driver
  347. * may hold the resource in the Timeout field.
  348. * If the resource is held by someone else, the command completes with
  349. * busy return value and the timeout field indicates the maximum time
  350. * the current owner of the resource has to free it.
  351. */
  352. if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
  353. *timeout = le32_to_cpu(cmd_resp->timeout);
  354. return status;
  355. }
  356. /**
  357. * ice_aq_release_res
  358. * @hw: pointer to the hw struct
  359. * @res: resource id
  360. * @sdp_number: resource number
  361. * @cd: pointer to command details structure or NULL
  362. *
  363. * release common resource using the admin queue commands (0x0009)
  364. */
  365. static enum ice_status
  366. ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
  367. struct ice_sq_cd *cd)
  368. {
  369. struct ice_aqc_req_res *cmd;
  370. struct ice_aq_desc desc;
  371. cmd = &desc.params.res_owner;
  372. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
  373. cmd->res_id = cpu_to_le16(res);
  374. cmd->res_number = cpu_to_le32(sdp_number);
  375. return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  376. }
  377. /**
  378. * ice_acquire_res
  379. * @hw: pointer to the HW structure
  380. * @res: resource id
  381. * @access: access type (read or write)
  382. *
  383. * This function will attempt to acquire the ownership of a resource.
  384. */
  385. enum ice_status
  386. ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
  387. enum ice_aq_res_access_type access)
  388. {
  389. #define ICE_RES_POLLING_DELAY_MS 10
  390. u32 delay = ICE_RES_POLLING_DELAY_MS;
  391. enum ice_status status;
  392. u32 time_left = 0;
  393. u32 timeout;
  394. status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
  395. /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
  396. * driver has previously acquired the resource and performed any
  397. * necessary updates; in this case the caller does not obtain the
  398. * resource and has no further work to do.
  399. */
  400. if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
  401. status = ICE_ERR_AQ_NO_WORK;
  402. goto ice_acquire_res_exit;
  403. }
  404. if (status)
  405. ice_debug(hw, ICE_DBG_RES,
  406. "resource %d acquire type %d failed.\n", res, access);
  407. /* If necessary, poll until the current lock owner timeouts */
  408. timeout = time_left;
  409. while (status && timeout && time_left) {
  410. mdelay(delay);
  411. timeout = (timeout > delay) ? timeout - delay : 0;
  412. status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
  413. if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
  414. /* lock free, but no work to do */
  415. status = ICE_ERR_AQ_NO_WORK;
  416. break;
  417. }
  418. if (!status)
  419. /* lock acquired */
  420. break;
  421. }
  422. if (status && status != ICE_ERR_AQ_NO_WORK)
  423. ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
  424. ice_acquire_res_exit:
  425. if (status == ICE_ERR_AQ_NO_WORK) {
  426. if (access == ICE_RES_WRITE)
  427. ice_debug(hw, ICE_DBG_RES,
  428. "resource indicates no work to do.\n");
  429. else
  430. ice_debug(hw, ICE_DBG_RES,
  431. "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
  432. }
  433. return status;
  434. }
  435. /**
  436. * ice_release_res
  437. * @hw: pointer to the HW structure
  438. * @res: resource id
  439. *
  440. * This function will release a resource using the proper Admin Command.
  441. */
  442. void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
  443. {
  444. enum ice_status status;
  445. u32 total_delay = 0;
  446. status = ice_aq_release_res(hw, res, 0, NULL);
  447. /* there are some rare cases when trying to release the resource
  448. * results in an admin Q timeout, so handle them correctly
  449. */
  450. while ((status == ICE_ERR_AQ_TIMEOUT) &&
  451. (total_delay < hw->adminq.sq_cmd_timeout)) {
  452. mdelay(1);
  453. status = ice_aq_release_res(hw, res, 0, NULL);
  454. total_delay++;
  455. }
  456. }
  457. /**
  458. * ice_parse_caps - parse function/device capabilities
  459. * @hw: pointer to the hw struct
  460. * @buf: pointer to a buffer containing function/device capability records
  461. * @cap_count: number of capability records in the list
  462. * @opc: type of capabilities list to parse
  463. *
  464. * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
  465. */
  466. static void
  467. ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
  468. enum ice_adminq_opc opc)
  469. {
  470. struct ice_aqc_list_caps_elem *cap_resp;
  471. struct ice_hw_func_caps *func_p = NULL;
  472. struct ice_hw_dev_caps *dev_p = NULL;
  473. struct ice_hw_common_caps *caps;
  474. u32 i;
  475. if (!buf)
  476. return;
  477. cap_resp = (struct ice_aqc_list_caps_elem *)buf;
  478. if (opc == ice_aqc_opc_list_dev_caps) {
  479. dev_p = &hw->dev_caps;
  480. caps = &dev_p->common_cap;
  481. } else if (opc == ice_aqc_opc_list_func_caps) {
  482. func_p = &hw->func_caps;
  483. caps = &func_p->common_cap;
  484. } else {
  485. ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
  486. return;
  487. }
  488. for (i = 0; caps && i < cap_count; i++, cap_resp++) {
  489. u32 logical_id = le32_to_cpu(cap_resp->logical_id);
  490. u32 phys_id = le32_to_cpu(cap_resp->phys_id);
  491. u32 number = le32_to_cpu(cap_resp->number);
  492. u16 cap = le16_to_cpu(cap_resp->cap);
  493. switch (cap) {
  494. case ICE_AQC_CAPS_VSI:
  495. if (dev_p) {
  496. dev_p->num_vsi_allocd_to_host = number;
  497. ice_debug(hw, ICE_DBG_INIT,
  498. "HW caps: Dev.VSI cnt = %d\n",
  499. dev_p->num_vsi_allocd_to_host);
  500. } else if (func_p) {
  501. func_p->guaranteed_num_vsi = number;
  502. ice_debug(hw, ICE_DBG_INIT,
  503. "HW caps: Func.VSI cnt = %d\n",
  504. func_p->guaranteed_num_vsi);
  505. }
  506. break;
  507. case ICE_AQC_CAPS_RSS:
  508. caps->rss_table_size = number;
  509. caps->rss_table_entry_width = logical_id;
  510. ice_debug(hw, ICE_DBG_INIT,
  511. "HW caps: RSS table size = %d\n",
  512. caps->rss_table_size);
  513. ice_debug(hw, ICE_DBG_INIT,
  514. "HW caps: RSS table width = %d\n",
  515. caps->rss_table_entry_width);
  516. break;
  517. case ICE_AQC_CAPS_RXQS:
  518. caps->num_rxq = number;
  519. caps->rxq_first_id = phys_id;
  520. ice_debug(hw, ICE_DBG_INIT,
  521. "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
  522. ice_debug(hw, ICE_DBG_INIT,
  523. "HW caps: Rx first queue ID = %d\n",
  524. caps->rxq_first_id);
  525. break;
  526. case ICE_AQC_CAPS_TXQS:
  527. caps->num_txq = number;
  528. caps->txq_first_id = phys_id;
  529. ice_debug(hw, ICE_DBG_INIT,
  530. "HW caps: Num Tx Qs = %d\n", caps->num_txq);
  531. ice_debug(hw, ICE_DBG_INIT,
  532. "HW caps: Tx first queue ID = %d\n",
  533. caps->txq_first_id);
  534. break;
  535. case ICE_AQC_CAPS_MSIX:
  536. caps->num_msix_vectors = number;
  537. caps->msix_vector_first_id = phys_id;
  538. ice_debug(hw, ICE_DBG_INIT,
  539. "HW caps: MSIX vector count = %d\n",
  540. caps->num_msix_vectors);
  541. ice_debug(hw, ICE_DBG_INIT,
  542. "HW caps: MSIX first vector index = %d\n",
  543. caps->msix_vector_first_id);
  544. break;
  545. case ICE_AQC_CAPS_MAX_MTU:
  546. caps->max_mtu = number;
  547. if (dev_p)
  548. ice_debug(hw, ICE_DBG_INIT,
  549. "HW caps: Dev.MaxMTU = %d\n",
  550. caps->max_mtu);
  551. else if (func_p)
  552. ice_debug(hw, ICE_DBG_INIT,
  553. "HW caps: func.MaxMTU = %d\n",
  554. caps->max_mtu);
  555. break;
  556. default:
  557. ice_debug(hw, ICE_DBG_INIT,
  558. "HW caps: Unknown capability[%d]: 0x%x\n", i,
  559. cap);
  560. break;
  561. }
  562. }
  563. }
  564. /**
  565. * ice_aq_discover_caps - query function/device capabilities
  566. * @hw: pointer to the hw struct
  567. * @buf: a virtual buffer to hold the capabilities
  568. * @buf_size: Size of the virtual buffer
  569. * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
  570. * @opc: capabilities type to discover - pass in the command opcode
  571. * @cd: pointer to command details structure or NULL
  572. *
  573. * Get the function(0x000a)/device(0x000b) capabilities description from
  574. * the firmware.
  575. */
  576. static enum ice_status
  577. ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
  578. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  579. {
  580. struct ice_aqc_list_caps *cmd;
  581. struct ice_aq_desc desc;
  582. enum ice_status status;
  583. cmd = &desc.params.get_cap;
  584. if (opc != ice_aqc_opc_list_func_caps &&
  585. opc != ice_aqc_opc_list_dev_caps)
  586. return ICE_ERR_PARAM;
  587. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  588. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  589. if (!status)
  590. ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
  591. *data_size = le16_to_cpu(desc.datalen);
  592. return status;
  593. }
  594. /**
  595. * ice_get_caps - get info about the HW
  596. * @hw: pointer to the hardware structure
  597. */
  598. enum ice_status ice_get_caps(struct ice_hw *hw)
  599. {
  600. enum ice_status status;
  601. u16 data_size = 0;
  602. u16 cbuf_len;
  603. u8 retries;
  604. /* The driver doesn't know how many capabilities the device will return
  605. * so the buffer size required isn't known ahead of time. The driver
  606. * starts with cbuf_len and if this turns out to be insufficient, the
  607. * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
  608. * The driver then allocates the buffer of this size and retries the
  609. * operation. So it follows that the retry count is 2.
  610. */
  611. #define ICE_GET_CAP_BUF_COUNT 40
  612. #define ICE_GET_CAP_RETRY_COUNT 2
  613. cbuf_len = ICE_GET_CAP_BUF_COUNT *
  614. sizeof(struct ice_aqc_list_caps_elem);
  615. retries = ICE_GET_CAP_RETRY_COUNT;
  616. do {
  617. void *cbuf;
  618. cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
  619. if (!cbuf)
  620. return ICE_ERR_NO_MEMORY;
  621. status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
  622. ice_aqc_opc_list_func_caps, NULL);
  623. devm_kfree(ice_hw_to_dev(hw), cbuf);
  624. if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
  625. break;
  626. /* If ENOMEM is returned, try again with bigger buffer */
  627. cbuf_len = data_size;
  628. } while (--retries);
  629. return status;
  630. }
  631. /**
  632. * ice_aq_clear_pxe_mode
  633. * @hw: pointer to the hw struct
  634. *
  635. * Tell the firmware that the driver is taking over from PXE (0x0110).
  636. */
  637. static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
  638. {
  639. struct ice_aq_desc desc;
  640. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
  641. desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
  642. return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  643. }
  644. /**
  645. * ice_clear_pxe_mode - clear pxe operations mode
  646. * @hw: pointer to the hw struct
  647. *
  648. * Make sure all PXE mode settings are cleared, including things
  649. * like descriptor fetch/write-back mode.
  650. */
  651. void ice_clear_pxe_mode(struct ice_hw *hw)
  652. {
  653. if (ice_check_sq_alive(hw, &hw->adminq))
  654. ice_aq_clear_pxe_mode(hw);
  655. }