smu7_smumgr.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "pp_debug.h"
  24. #include "smumgr.h"
  25. #include "smu_ucode_xfer_vi.h"
  26. #include "smu/smu_7_1_3_d.h"
  27. #include "smu/smu_7_1_3_sh_mask.h"
  28. #include "ppatomctrl.h"
  29. #include "cgs_common.h"
  30. #include "smu7_ppsmc.h"
  31. #include "smu7_smumgr.h"
  32. #define SMU7_SMC_SIZE 0x20000
  33. static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
  34. {
  35. PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
  36. PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
  37. cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
  38. SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
  39. return 0;
  40. }
  41. int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
  42. {
  43. uint32_t data;
  44. uint32_t addr;
  45. uint8_t *dest_byte;
  46. uint8_t i, data_byte[4] = {0};
  47. uint32_t *pdata = (uint32_t *)&data_byte;
  48. PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
  49. PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
  50. addr = smc_start_address;
  51. while (byte_count >= 4) {
  52. smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
  53. *dest = PP_SMC_TO_HOST_UL(data);
  54. dest += 1;
  55. byte_count -= 4;
  56. addr += 4;
  57. }
  58. if (byte_count) {
  59. smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
  60. *pdata = PP_SMC_TO_HOST_UL(data);
  61. /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
  62. dest_byte = (uint8_t *)dest;
  63. for (i = 0; i < byte_count; i++)
  64. dest_byte[i] = data_byte[i];
  65. }
  66. return 0;
  67. }
  68. int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
  69. const uint8_t *src, uint32_t byte_count, uint32_t limit)
  70. {
  71. int result;
  72. uint32_t data = 0;
  73. uint32_t original_data;
  74. uint32_t addr = 0;
  75. uint32_t extra_shift;
  76. PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
  77. PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
  78. addr = smc_start_address;
  79. while (byte_count >= 4) {
  80. /* Bytes are written into the SMC addres space with the MSB first. */
  81. data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
  82. result = smu7_set_smc_sram_address(smumgr, addr, limit);
  83. if (0 != result)
  84. return result;
  85. cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
  86. src += 4;
  87. byte_count -= 4;
  88. addr += 4;
  89. }
  90. if (0 != byte_count) {
  91. data = 0;
  92. result = smu7_set_smc_sram_address(smumgr, addr, limit);
  93. if (0 != result)
  94. return result;
  95. original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
  96. extra_shift = 8 * (4 - byte_count);
  97. while (byte_count > 0) {
  98. /* Bytes are written into the SMC addres space with the MSB first. */
  99. data = (0x100 * data) + *src++;
  100. byte_count--;
  101. }
  102. data <<= extra_shift;
  103. data |= (original_data & ~((~0UL) << extra_shift));
  104. result = smu7_set_smc_sram_address(smumgr, addr, limit);
  105. if (0 != result)
  106. return result;
  107. cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
  108. }
  109. return 0;
  110. }
  111. int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
  112. {
  113. static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
  114. smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
  115. return 0;
  116. }
  117. bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
  118. {
  119. return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
  120. && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
  121. }
  122. int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
  123. {
  124. int ret;
  125. if (!smu7_is_smc_ram_running(smumgr))
  126. return -EINVAL;
  127. SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
  128. ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
  129. if (ret != 1)
  130. pr_info("\n failed to send pre message %x ret is %d \n", msg, ret);
  131. cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
  132. SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
  133. ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
  134. if (ret != 1)
  135. pr_info("\n failed to send message %x ret is %d \n", msg, ret);
  136. return 0;
  137. }
  138. int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
  139. {
  140. cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
  141. return 0;
  142. }
  143. int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
  144. {
  145. if (!smu7_is_smc_ram_running(smumgr)) {
  146. return -EINVAL;
  147. }
  148. SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
  149. cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
  150. return smu7_send_msg_to_smc(smumgr, msg);
  151. }
  152. int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
  153. {
  154. cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
  155. return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
  156. }
  157. int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
  158. {
  159. cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
  160. cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
  161. SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
  162. if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
  163. pr_info("Failed to send Message.\n");
  164. return 0;
  165. }
  166. int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
  167. {
  168. if (!smu7_is_smc_ram_running(smumgr))
  169. return -EINVAL;
  170. SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
  171. return 0;
  172. }
  173. enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
  174. {
  175. enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
  176. switch (fw_type) {
  177. case UCODE_ID_SMU:
  178. result = CGS_UCODE_ID_SMU;
  179. break;
  180. case UCODE_ID_SMU_SK:
  181. result = CGS_UCODE_ID_SMU_SK;
  182. break;
  183. case UCODE_ID_SDMA0:
  184. result = CGS_UCODE_ID_SDMA0;
  185. break;
  186. case UCODE_ID_SDMA1:
  187. result = CGS_UCODE_ID_SDMA1;
  188. break;
  189. case UCODE_ID_CP_CE:
  190. result = CGS_UCODE_ID_CP_CE;
  191. break;
  192. case UCODE_ID_CP_PFP:
  193. result = CGS_UCODE_ID_CP_PFP;
  194. break;
  195. case UCODE_ID_CP_ME:
  196. result = CGS_UCODE_ID_CP_ME;
  197. break;
  198. case UCODE_ID_CP_MEC:
  199. result = CGS_UCODE_ID_CP_MEC;
  200. break;
  201. case UCODE_ID_CP_MEC_JT1:
  202. result = CGS_UCODE_ID_CP_MEC_JT1;
  203. break;
  204. case UCODE_ID_CP_MEC_JT2:
  205. result = CGS_UCODE_ID_CP_MEC_JT2;
  206. break;
  207. case UCODE_ID_RLC_G:
  208. result = CGS_UCODE_ID_RLC_G;
  209. break;
  210. case UCODE_ID_MEC_STORAGE:
  211. result = CGS_UCODE_ID_STORAGE;
  212. break;
  213. default:
  214. break;
  215. }
  216. return result;
  217. }
  218. int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
  219. {
  220. int result;
  221. result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
  222. if (result)
  223. return result;
  224. *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
  225. return 0;
  226. }
  227. int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
  228. {
  229. int result;
  230. result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
  231. if (result)
  232. return result;
  233. cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
  234. return 0;
  235. }
  236. /* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
  237. static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
  238. {
  239. uint32_t result = 0;
  240. switch (fw_type) {
  241. case UCODE_ID_SDMA0:
  242. result = UCODE_ID_SDMA0_MASK;
  243. break;
  244. case UCODE_ID_SDMA1:
  245. result = UCODE_ID_SDMA1_MASK;
  246. break;
  247. case UCODE_ID_CP_CE:
  248. result = UCODE_ID_CP_CE_MASK;
  249. break;
  250. case UCODE_ID_CP_PFP:
  251. result = UCODE_ID_CP_PFP_MASK;
  252. break;
  253. case UCODE_ID_CP_ME:
  254. result = UCODE_ID_CP_ME_MASK;
  255. break;
  256. case UCODE_ID_CP_MEC:
  257. case UCODE_ID_CP_MEC_JT1:
  258. case UCODE_ID_CP_MEC_JT2:
  259. result = UCODE_ID_CP_MEC_MASK;
  260. break;
  261. case UCODE_ID_RLC_G:
  262. result = UCODE_ID_RLC_G_MASK;
  263. break;
  264. default:
  265. pr_info("UCode type is out of range! \n");
  266. result = 0;
  267. }
  268. return result;
  269. }
  270. static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
  271. uint32_t fw_type,
  272. struct SMU_Entry *entry)
  273. {
  274. int result = 0;
  275. struct cgs_firmware_info info = {0};
  276. result = cgs_get_firmware_info(smumgr->device,
  277. smu7_convert_fw_type_to_cgs(fw_type),
  278. &info);
  279. if (!result) {
  280. entry->version = info.fw_version;
  281. entry->id = (uint16_t)fw_type;
  282. entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
  283. entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
  284. entry->meta_data_addr_high = 0;
  285. entry->meta_data_addr_low = 0;
  286. /* digest need be excluded out */
  287. if (cgs_is_virtualization_enabled(smumgr->device))
  288. info.image_size -= 20;
  289. entry->data_size_byte = info.image_size;
  290. entry->num_register_entries = 0;
  291. }
  292. if (fw_type == UCODE_ID_RLC_G)
  293. entry->flags = 1;
  294. else
  295. entry->flags = 0;
  296. return 0;
  297. }
  298. int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
  299. {
  300. struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
  301. uint32_t fw_to_load;
  302. int result = 0;
  303. struct SMU_DRAMData_TOC *toc;
  304. if (!smumgr->reload_fw) {
  305. pr_info("skip reloading...\n");
  306. return 0;
  307. }
  308. if (smu_data->soft_regs_start)
  309. cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
  310. smu_data->soft_regs_start + smum_get_offsetof(smumgr,
  311. SMU_SoftRegisters, UcodeLoadStatus),
  312. 0x0);
  313. if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
  314. if (!cgs_is_virtualization_enabled(smumgr->device)) {
  315. smu7_send_msg_to_smc_with_parameter(smumgr,
  316. PPSMC_MSG_SMU_DRAM_ADDR_HI,
  317. smu_data->smu_buffer.mc_addr_high);
  318. smu7_send_msg_to_smc_with_parameter(smumgr,
  319. PPSMC_MSG_SMU_DRAM_ADDR_LO,
  320. smu_data->smu_buffer.mc_addr_low);
  321. }
  322. fw_to_load = UCODE_ID_RLC_G_MASK
  323. + UCODE_ID_SDMA0_MASK
  324. + UCODE_ID_SDMA1_MASK
  325. + UCODE_ID_CP_CE_MASK
  326. + UCODE_ID_CP_ME_MASK
  327. + UCODE_ID_CP_PFP_MASK
  328. + UCODE_ID_CP_MEC_MASK;
  329. } else {
  330. fw_to_load = UCODE_ID_RLC_G_MASK
  331. + UCODE_ID_SDMA0_MASK
  332. + UCODE_ID_SDMA1_MASK
  333. + UCODE_ID_CP_CE_MASK
  334. + UCODE_ID_CP_ME_MASK
  335. + UCODE_ID_CP_PFP_MASK
  336. + UCODE_ID_CP_MEC_MASK
  337. + UCODE_ID_CP_MEC_JT1_MASK
  338. + UCODE_ID_CP_MEC_JT2_MASK;
  339. }
  340. toc = (struct SMU_DRAMData_TOC *)smu_data->header;
  341. toc->num_entries = 0;
  342. toc->structure_version = 1;
  343. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  344. UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
  345. "Failed to Get Firmware Entry.", return -EINVAL);
  346. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  347. UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
  348. "Failed to Get Firmware Entry.", return -EINVAL);
  349. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  350. UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
  351. "Failed to Get Firmware Entry.", return -EINVAL);
  352. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  353. UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
  354. "Failed to Get Firmware Entry.", return -EINVAL);
  355. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  356. UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
  357. "Failed to Get Firmware Entry.", return -EINVAL);
  358. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  359. UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
  360. "Failed to Get Firmware Entry.", return -EINVAL);
  361. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  362. UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
  363. "Failed to Get Firmware Entry.", return -EINVAL);
  364. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  365. UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
  366. "Failed to Get Firmware Entry.", return -EINVAL);
  367. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  368. UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
  369. "Failed to Get Firmware Entry.", return -EINVAL);
  370. if (cgs_is_virtualization_enabled(smumgr->device))
  371. PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
  372. UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
  373. "Failed to Get Firmware Entry.", return -EINVAL);
  374. smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
  375. smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
  376. if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
  377. pr_err("Fail to Request SMU Load uCode");
  378. return result;
  379. }
  380. /* Check if the FW has been loaded, SMU will not return if loading has not finished. */
  381. int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
  382. {
  383. struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
  384. uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
  385. uint32_t ret;
  386. ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
  387. smu_data->soft_regs_start + smum_get_offsetof(smumgr,
  388. SMU_SoftRegisters, UcodeLoadStatus),
  389. fw_mask, fw_mask);
  390. return ret;
  391. }
  392. int smu7_reload_firmware(struct pp_smumgr *smumgr)
  393. {
  394. return smumgr->smumgr_funcs->start_smu(smumgr);
  395. }
  396. static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
  397. {
  398. uint32_t byte_count = length;
  399. PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
  400. cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
  401. SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
  402. for (; byte_count >= 4; byte_count -= 4)
  403. cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
  404. SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
  405. PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
  406. return 0;
  407. }
  408. int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
  409. {
  410. int result = 0;
  411. struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
  412. struct cgs_firmware_info info = {0};
  413. if (smu_data->security_hard_key == 1)
  414. cgs_get_firmware_info(smumgr->device,
  415. smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
  416. else
  417. cgs_get_firmware_info(smumgr->device,
  418. smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
  419. smumgr->is_kicker = info.is_kicker;
  420. result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
  421. return result;
  422. }
  423. int smu7_init(struct pp_smumgr *smumgr)
  424. {
  425. struct smu7_smumgr *smu_data;
  426. uint8_t *internal_buf;
  427. uint64_t mc_addr = 0;
  428. /* Allocate memory for backend private data */
  429. smu_data = (struct smu7_smumgr *)(smumgr->backend);
  430. smu_data->header_buffer.data_size =
  431. ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
  432. /* Allocate FW image data structure and header buffer and
  433. * send the header buffer address to SMU */
  434. smu_allocate_memory(smumgr->device,
  435. smu_data->header_buffer.data_size,
  436. CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
  437. PAGE_SIZE,
  438. &mc_addr,
  439. &smu_data->header_buffer.kaddr,
  440. &smu_data->header_buffer.handle);
  441. smu_data->header = smu_data->header_buffer.kaddr;
  442. smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
  443. smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
  444. PP_ASSERT_WITH_CODE((NULL != smu_data->header),
  445. "Out of memory.",
  446. kfree(smumgr->backend);
  447. cgs_free_gpu_mem(smumgr->device,
  448. (cgs_handle_t)smu_data->header_buffer.handle);
  449. return -EINVAL);
  450. if (cgs_is_virtualization_enabled(smumgr->device))
  451. return 0;
  452. smu_data->smu_buffer.data_size = 200*4096;
  453. smu_allocate_memory(smumgr->device,
  454. smu_data->smu_buffer.data_size,
  455. CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
  456. PAGE_SIZE,
  457. &mc_addr,
  458. &smu_data->smu_buffer.kaddr,
  459. &smu_data->smu_buffer.handle);
  460. internal_buf = smu_data->smu_buffer.kaddr;
  461. smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
  462. smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
  463. PP_ASSERT_WITH_CODE((NULL != internal_buf),
  464. "Out of memory.",
  465. kfree(smumgr->backend);
  466. cgs_free_gpu_mem(smumgr->device,
  467. (cgs_handle_t)smu_data->smu_buffer.handle);
  468. return -EINVAL);
  469. return 0;
  470. }
  471. int smu7_smu_fini(struct pp_smumgr *smumgr)
  472. {
  473. if (smumgr->backend) {
  474. kfree(smumgr->backend);
  475. smumgr->backend = NULL;
  476. }
  477. cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
  478. return 0;
  479. }