cz_smumgr.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/types.h>
  24. #include <linux/kernel.h>
  25. #include <linux/slab.h>
  26. #include <linux/gfp.h>
  27. #include "linux/delay.h"
  28. #include "cgs_common.h"
  29. #include "smu/smu_8_0_d.h"
  30. #include "smu/smu_8_0_sh_mask.h"
  31. #include "smu8.h"
  32. #include "smu8_fusion.h"
  33. #include "cz_smumgr.h"
  34. #include "cz_ppsmc.h"
  35. #include "smu_ucode_xfer_cz.h"
  36. #include "gca/gfx_8_0_d.h"
  37. #include "gca/gfx_8_0_sh_mask.h"
  38. #include "smumgr.h"
  39. #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
  40. static const enum cz_scratch_entry firmware_list[] = {
  41. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  42. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
  43. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
  44. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
  45. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
  46. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  47. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
  48. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
  49. };
  50. static int cz_smum_get_argument(struct pp_smumgr *smumgr)
  51. {
  52. if (smumgr == NULL || smumgr->device == NULL)
  53. return -EINVAL;
  54. return cgs_read_register(smumgr->device,
  55. mmSMU_MP1_SRBM2P_ARG_0);
  56. }
  57. static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
  58. uint16_t msg)
  59. {
  60. int result = 0;
  61. if (smumgr == NULL || smumgr->device == NULL)
  62. return -EINVAL;
  63. result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
  64. SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
  65. if (result != 0) {
  66. pr_err("cz_send_msg_to_smc_async failed\n");
  67. return result;
  68. }
  69. cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
  70. cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
  71. return 0;
  72. }
  73. /* Send a message to the SMC, and wait for its response.*/
  74. static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
  75. {
  76. int result = 0;
  77. result = cz_send_msg_to_smc_async(smumgr, msg);
  78. if (result != 0)
  79. return result;
  80. return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
  81. SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
  82. }
  83. static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
  84. uint32_t smc_address, uint32_t limit)
  85. {
  86. if (smumgr == NULL || smumgr->device == NULL)
  87. return -EINVAL;
  88. if (0 != (3 & smc_address)) {
  89. pr_err("SMC address must be 4 byte aligned\n");
  90. return -EINVAL;
  91. }
  92. if (limit <= (smc_address + 3)) {
  93. pr_err("SMC address beyond the SMC RAM area\n");
  94. return -EINVAL;
  95. }
  96. cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
  97. SMN_MP1_SRAM_START_ADDR + smc_address);
  98. return 0;
  99. }
  100. static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
  101. uint32_t smc_address, uint32_t value, uint32_t limit)
  102. {
  103. int result;
  104. if (smumgr == NULL || smumgr->device == NULL)
  105. return -EINVAL;
  106. result = cz_set_smc_sram_address(smumgr, smc_address, limit);
  107. if (!result)
  108. cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
  109. return result;
  110. }
  111. static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
  112. uint16_t msg, uint32_t parameter)
  113. {
  114. if (smumgr == NULL || smumgr->device == NULL)
  115. return -EINVAL;
  116. cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
  117. return cz_send_msg_to_smc(smumgr, msg);
  118. }
  119. static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
  120. uint32_t firmware)
  121. {
  122. int i;
  123. uint32_t index = SMN_MP1_SRAM_START_ADDR +
  124. SMU8_FIRMWARE_HEADER_LOCATION +
  125. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  126. if (smumgr == NULL || smumgr->device == NULL)
  127. return -EINVAL;
  128. cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
  129. for (i = 0; i < smumgr->usec_timeout; i++) {
  130. if (firmware ==
  131. (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
  132. break;
  133. udelay(1);
  134. }
  135. if (i >= smumgr->usec_timeout) {
  136. pr_err("SMU check loaded firmware failed.\n");
  137. return -EINVAL;
  138. }
  139. return 0;
  140. }
  141. static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
  142. {
  143. uint32_t reg_data;
  144. uint32_t tmp;
  145. int ret = 0;
  146. struct cgs_firmware_info info = {0};
  147. struct cz_smumgr *cz_smu;
  148. if (smumgr == NULL || smumgr->device == NULL)
  149. return -EINVAL;
  150. cz_smu = (struct cz_smumgr *)smumgr->backend;
  151. ret = cgs_get_firmware_info(smumgr->device,
  152. CGS_UCODE_ID_CP_MEC, &info);
  153. if (ret)
  154. return -EINVAL;
  155. /* Disable MEC parsing/prefetching */
  156. tmp = cgs_read_register(smumgr->device,
  157. mmCP_MEC_CNTL);
  158. tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
  159. tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
  160. cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
  161. tmp = cgs_read_register(smumgr->device,
  162. mmCP_CPC_IC_BASE_CNTL);
  163. tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
  164. tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
  165. tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
  166. tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
  167. cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
  168. reg_data = smu_lower_32_bits(info.mc_addr) &
  169. SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
  170. cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
  171. reg_data = smu_upper_32_bits(info.mc_addr) &
  172. SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
  173. cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
  174. return 0;
  175. }
  176. static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
  177. enum cz_scratch_entry firmware_enum)
  178. {
  179. uint8_t ret = 0;
  180. switch (firmware_enum) {
  181. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
  182. ret = UCODE_ID_SDMA0;
  183. break;
  184. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
  185. if (smumgr->chip_id == CHIP_STONEY)
  186. ret = UCODE_ID_SDMA0;
  187. else
  188. ret = UCODE_ID_SDMA1;
  189. break;
  190. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
  191. ret = UCODE_ID_CP_CE;
  192. break;
  193. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
  194. ret = UCODE_ID_CP_PFP;
  195. break;
  196. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
  197. ret = UCODE_ID_CP_ME;
  198. break;
  199. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
  200. ret = UCODE_ID_CP_MEC_JT1;
  201. break;
  202. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
  203. if (smumgr->chip_id == CHIP_STONEY)
  204. ret = UCODE_ID_CP_MEC_JT1;
  205. else
  206. ret = UCODE_ID_CP_MEC_JT2;
  207. break;
  208. case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
  209. ret = UCODE_ID_GMCON_RENG;
  210. break;
  211. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
  212. ret = UCODE_ID_RLC_G;
  213. break;
  214. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
  215. ret = UCODE_ID_RLC_SCRATCH;
  216. break;
  217. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
  218. ret = UCODE_ID_RLC_SRM_ARAM;
  219. break;
  220. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
  221. ret = UCODE_ID_RLC_SRM_DRAM;
  222. break;
  223. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
  224. ret = UCODE_ID_DMCU_ERAM;
  225. break;
  226. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
  227. ret = UCODE_ID_DMCU_IRAM;
  228. break;
  229. case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
  230. ret = TASK_ARG_INIT_MM_PWR_LOG;
  231. break;
  232. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
  233. case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
  234. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
  235. case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
  236. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
  237. case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
  238. ret = TASK_ARG_REG_MMIO;
  239. break;
  240. case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
  241. ret = TASK_ARG_INIT_CLK_TABLE;
  242. break;
  243. }
  244. return ret;
  245. }
  246. static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
  247. {
  248. enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
  249. switch (fw_type) {
  250. case UCODE_ID_SDMA0:
  251. result = CGS_UCODE_ID_SDMA0;
  252. break;
  253. case UCODE_ID_SDMA1:
  254. result = CGS_UCODE_ID_SDMA1;
  255. break;
  256. case UCODE_ID_CP_CE:
  257. result = CGS_UCODE_ID_CP_CE;
  258. break;
  259. case UCODE_ID_CP_PFP:
  260. result = CGS_UCODE_ID_CP_PFP;
  261. break;
  262. case UCODE_ID_CP_ME:
  263. result = CGS_UCODE_ID_CP_ME;
  264. break;
  265. case UCODE_ID_CP_MEC_JT1:
  266. result = CGS_UCODE_ID_CP_MEC_JT1;
  267. break;
  268. case UCODE_ID_CP_MEC_JT2:
  269. result = CGS_UCODE_ID_CP_MEC_JT2;
  270. break;
  271. case UCODE_ID_RLC_G:
  272. result = CGS_UCODE_ID_RLC_G;
  273. break;
  274. default:
  275. break;
  276. }
  277. return result;
  278. }
  279. static int cz_smu_populate_single_scratch_task(
  280. struct pp_smumgr *smumgr,
  281. enum cz_scratch_entry fw_enum,
  282. uint8_t type, bool is_last)
  283. {
  284. uint8_t i;
  285. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  286. struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
  287. struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
  288. task->type = type;
  289. task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
  290. task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
  291. for (i = 0; i < cz_smu->scratch_buffer_length; i++)
  292. if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
  293. break;
  294. if (i >= cz_smu->scratch_buffer_length) {
  295. pr_err("Invalid Firmware Type\n");
  296. return -EINVAL;
  297. }
  298. task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low;
  299. task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high;
  300. task->size_bytes = cz_smu->scratch_buffer[i].data_size;
  301. if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
  302. struct cz_ih_meta_data *pIHReg_restore =
  303. (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
  304. pIHReg_restore->command =
  305. METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
  306. }
  307. return 0;
  308. }
  309. static int cz_smu_populate_single_ucode_load_task(
  310. struct pp_smumgr *smumgr,
  311. enum cz_scratch_entry fw_enum,
  312. bool is_last)
  313. {
  314. uint8_t i;
  315. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  316. struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
  317. struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
  318. task->type = TASK_TYPE_UCODE_LOAD;
  319. task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
  320. task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
  321. for (i = 0; i < cz_smu->driver_buffer_length; i++)
  322. if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
  323. break;
  324. if (i >= cz_smu->driver_buffer_length) {
  325. pr_err("Invalid Firmware Type\n");
  326. return -EINVAL;
  327. }
  328. task->addr.low = cz_smu->driver_buffer[i].mc_addr_low;
  329. task->addr.high = cz_smu->driver_buffer[i].mc_addr_high;
  330. task->size_bytes = cz_smu->driver_buffer[i].data_size;
  331. return 0;
  332. }
  333. static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
  334. {
  335. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  336. cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
  337. cz_smu_populate_single_scratch_task(smumgr,
  338. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  339. TASK_TYPE_UCODE_SAVE, true);
  340. return 0;
  341. }
  342. static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
  343. {
  344. int i;
  345. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  346. struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
  347. for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
  348. toc->JobList[i] = (uint8_t)IGNORE_JOB;
  349. return 0;
  350. }
  351. static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
  352. {
  353. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  354. struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
  355. toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
  356. cz_smu_populate_single_scratch_task(smumgr,
  357. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  358. TASK_TYPE_UCODE_SAVE, false);
  359. cz_smu_populate_single_scratch_task(smumgr,
  360. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  361. TASK_TYPE_UCODE_SAVE, true);
  362. return 0;
  363. }
  364. static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
  365. {
  366. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  367. struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
  368. toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
  369. cz_smu_populate_single_ucode_load_task(smumgr,
  370. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  371. cz_smu_populate_single_ucode_load_task(smumgr,
  372. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  373. cz_smu_populate_single_ucode_load_task(smumgr,
  374. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  375. cz_smu_populate_single_ucode_load_task(smumgr,
  376. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  377. if (smumgr->chip_id == CHIP_STONEY)
  378. cz_smu_populate_single_ucode_load_task(smumgr,
  379. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  380. else
  381. cz_smu_populate_single_ucode_load_task(smumgr,
  382. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  383. cz_smu_populate_single_ucode_load_task(smumgr,
  384. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
  385. /* populate scratch */
  386. cz_smu_populate_single_scratch_task(smumgr,
  387. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  388. TASK_TYPE_UCODE_LOAD, false);
  389. cz_smu_populate_single_scratch_task(smumgr,
  390. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  391. TASK_TYPE_UCODE_LOAD, false);
  392. cz_smu_populate_single_scratch_task(smumgr,
  393. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  394. TASK_TYPE_UCODE_LOAD, true);
  395. return 0;
  396. }
  397. static int cz_smu_construct_toc_for_power_profiling(
  398. struct pp_smumgr *smumgr)
  399. {
  400. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  401. cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
  402. cz_smu_populate_single_scratch_task(smumgr,
  403. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  404. TASK_TYPE_INITIALIZE, true);
  405. return 0;
  406. }
  407. static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
  408. {
  409. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  410. cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
  411. cz_smu_populate_single_ucode_load_task(smumgr,
  412. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  413. if (smumgr->chip_id != CHIP_STONEY)
  414. cz_smu_populate_single_ucode_load_task(smumgr,
  415. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
  416. cz_smu_populate_single_ucode_load_task(smumgr,
  417. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  418. cz_smu_populate_single_ucode_load_task(smumgr,
  419. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  420. cz_smu_populate_single_ucode_load_task(smumgr,
  421. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  422. cz_smu_populate_single_ucode_load_task(smumgr,
  423. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  424. if (smumgr->chip_id != CHIP_STONEY)
  425. cz_smu_populate_single_ucode_load_task(smumgr,
  426. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  427. cz_smu_populate_single_ucode_load_task(smumgr,
  428. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
  429. return 0;
  430. }
  431. static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
  432. {
  433. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  434. cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
  435. cz_smu_populate_single_scratch_task(smumgr,
  436. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  437. TASK_TYPE_INITIALIZE, true);
  438. return 0;
  439. }
  440. static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
  441. {
  442. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  443. cz_smu->toc_entry_used_count = 0;
  444. cz_smu_initialize_toc_empty_job_list(smumgr);
  445. cz_smu_construct_toc_for_rlc_aram_save(smumgr);
  446. cz_smu_construct_toc_for_vddgfx_enter(smumgr);
  447. cz_smu_construct_toc_for_vddgfx_exit(smumgr);
  448. cz_smu_construct_toc_for_power_profiling(smumgr);
  449. cz_smu_construct_toc_for_bootup(smumgr);
  450. cz_smu_construct_toc_for_clock_table(smumgr);
  451. return 0;
  452. }
  453. static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
  454. {
  455. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  456. uint32_t firmware_type;
  457. uint32_t i;
  458. int ret;
  459. enum cgs_ucode_id ucode_id;
  460. struct cgs_firmware_info info = {0};
  461. cz_smu->driver_buffer_length = 0;
  462. for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
  463. firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
  464. firmware_list[i]);
  465. ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
  466. ret = cgs_get_firmware_info(smumgr->device,
  467. ucode_id, &info);
  468. if (ret == 0) {
  469. cz_smu->driver_buffer[i].mc_addr_high =
  470. smu_upper_32_bits(info.mc_addr);
  471. cz_smu->driver_buffer[i].mc_addr_low =
  472. smu_lower_32_bits(info.mc_addr);
  473. cz_smu->driver_buffer[i].data_size = info.image_size;
  474. cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
  475. cz_smu->driver_buffer_length++;
  476. }
  477. }
  478. return 0;
  479. }
  480. static int cz_smu_populate_single_scratch_entry(
  481. struct pp_smumgr *smumgr,
  482. enum cz_scratch_entry scratch_type,
  483. uint32_t ulsize_byte,
  484. struct cz_buffer_entry *entry)
  485. {
  486. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  487. long long mc_addr =
  488. ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
  489. | cz_smu->smu_buffer.mc_addr_low;
  490. uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
  491. mc_addr += cz_smu->smu_buffer_used_bytes;
  492. entry->data_size = ulsize_byte;
  493. entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
  494. cz_smu->smu_buffer_used_bytes;
  495. entry->mc_addr_low = smu_lower_32_bits(mc_addr);
  496. entry->mc_addr_high = smu_upper_32_bits(mc_addr);
  497. entry->firmware_ID = scratch_type;
  498. cz_smu->smu_buffer_used_bytes += ulsize_aligned;
  499. return 0;
  500. }
  501. static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
  502. {
  503. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  504. unsigned long i;
  505. for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
  506. if (cz_smu->scratch_buffer[i].firmware_ID
  507. == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  508. break;
  509. }
  510. *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
  511. cz_send_msg_to_smc_with_parameter(smumgr,
  512. PPSMC_MSG_SetClkTableAddrHi,
  513. cz_smu->scratch_buffer[i].mc_addr_high);
  514. cz_send_msg_to_smc_with_parameter(smumgr,
  515. PPSMC_MSG_SetClkTableAddrLo,
  516. cz_smu->scratch_buffer[i].mc_addr_low);
  517. cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
  518. cz_smu->toc_entry_clock_table);
  519. cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
  520. return 0;
  521. }
  522. static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
  523. {
  524. struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
  525. unsigned long i;
  526. for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
  527. if (cz_smu->scratch_buffer[i].firmware_ID
  528. == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  529. break;
  530. }
  531. cz_send_msg_to_smc_with_parameter(smumgr,
  532. PPSMC_MSG_SetClkTableAddrHi,
  533. cz_smu->scratch_buffer[i].mc_addr_high);
  534. cz_send_msg_to_smc_with_parameter(smumgr,
  535. PPSMC_MSG_SetClkTableAddrLo,
  536. cz_smu->scratch_buffer[i].mc_addr_low);
  537. cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
  538. cz_smu->toc_entry_clock_table);
  539. cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
  540. return 0;
  541. }
  542. static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
  543. {
  544. struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
  545. uint32_t smc_address;
  546. if (!smumgr->reload_fw) {
  547. pr_info("skip reloading...\n");
  548. return 0;
  549. }
  550. cz_smu_populate_firmware_entries(smumgr);
  551. cz_smu_construct_toc(smumgr);
  552. smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
  553. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  554. cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
  555. cz_send_msg_to_smc_with_parameter(smumgr,
  556. PPSMC_MSG_DriverDramAddrHi,
  557. cz_smu->toc_buffer.mc_addr_high);
  558. cz_send_msg_to_smc_with_parameter(smumgr,
  559. PPSMC_MSG_DriverDramAddrLo,
  560. cz_smu->toc_buffer.mc_addr_low);
  561. cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
  562. cz_send_msg_to_smc_with_parameter(smumgr,
  563. PPSMC_MSG_ExecuteJob,
  564. cz_smu->toc_entry_aram);
  565. cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
  566. cz_smu->toc_entry_power_profiling_index);
  567. return cz_send_msg_to_smc_with_parameter(smumgr,
  568. PPSMC_MSG_ExecuteJob,
  569. cz_smu->toc_entry_initialize_index);
  570. }
  571. static int cz_start_smu(struct pp_smumgr *smumgr)
  572. {
  573. int ret = 0;
  574. uint32_t fw_to_check = 0;
  575. fw_to_check = UCODE_ID_RLC_G_MASK |
  576. UCODE_ID_SDMA0_MASK |
  577. UCODE_ID_SDMA1_MASK |
  578. UCODE_ID_CP_CE_MASK |
  579. UCODE_ID_CP_ME_MASK |
  580. UCODE_ID_CP_PFP_MASK |
  581. UCODE_ID_CP_MEC_JT1_MASK |
  582. UCODE_ID_CP_MEC_JT2_MASK;
  583. if (smumgr->chip_id == CHIP_STONEY)
  584. fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
  585. ret = cz_request_smu_load_fw(smumgr);
  586. if (ret)
  587. pr_err("SMU firmware load failed\n");
  588. cz_check_fw_load_finish(smumgr, fw_to_check);
  589. ret = cz_load_mec_firmware(smumgr);
  590. if (ret)
  591. pr_err("Mec Firmware load failed\n");
  592. return ret;
  593. }
  594. static int cz_smu_init(struct pp_smumgr *smumgr)
  595. {
  596. uint64_t mc_addr = 0;
  597. int ret = 0;
  598. struct cz_smumgr *cz_smu;
  599. cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
  600. if (cz_smu == NULL)
  601. return -ENOMEM;
  602. smumgr->backend = cz_smu;
  603. cz_smu->toc_buffer.data_size = 4096;
  604. cz_smu->smu_buffer.data_size =
  605. ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
  606. ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
  607. ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
  608. ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
  609. ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
  610. ret = smu_allocate_memory(smumgr->device,
  611. cz_smu->toc_buffer.data_size,
  612. CGS_GPU_MEM_TYPE__GART_CACHEABLE,
  613. PAGE_SIZE,
  614. &mc_addr,
  615. &cz_smu->toc_buffer.kaddr,
  616. &cz_smu->toc_buffer.handle);
  617. if (ret != 0)
  618. return -1;
  619. cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
  620. cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
  621. ret = smu_allocate_memory(smumgr->device,
  622. cz_smu->smu_buffer.data_size,
  623. CGS_GPU_MEM_TYPE__GART_CACHEABLE,
  624. PAGE_SIZE,
  625. &mc_addr,
  626. &cz_smu->smu_buffer.kaddr,
  627. &cz_smu->smu_buffer.handle);
  628. if (ret != 0)
  629. return -1;
  630. cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
  631. cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
  632. if (0 != cz_smu_populate_single_scratch_entry(smumgr,
  633. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  634. UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
  635. &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
  636. pr_err("Error when Populate Firmware Entry.\n");
  637. return -1;
  638. }
  639. if (0 != cz_smu_populate_single_scratch_entry(smumgr,
  640. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  641. UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
  642. &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
  643. pr_err("Error when Populate Firmware Entry.\n");
  644. return -1;
  645. }
  646. if (0 != cz_smu_populate_single_scratch_entry(smumgr,
  647. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  648. UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
  649. &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
  650. pr_err("Error when Populate Firmware Entry.\n");
  651. return -1;
  652. }
  653. if (0 != cz_smu_populate_single_scratch_entry(smumgr,
  654. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  655. sizeof(struct SMU8_MultimediaPowerLogData),
  656. &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
  657. pr_err("Error when Populate Firmware Entry.\n");
  658. return -1;
  659. }
  660. if (0 != cz_smu_populate_single_scratch_entry(smumgr,
  661. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  662. sizeof(struct SMU8_Fusion_ClkTable),
  663. &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
  664. pr_err("Error when Populate Firmware Entry.\n");
  665. return -1;
  666. }
  667. return 0;
  668. }
  669. static int cz_smu_fini(struct pp_smumgr *smumgr)
  670. {
  671. struct cz_smumgr *cz_smu;
  672. if (smumgr == NULL || smumgr->device == NULL)
  673. return -EINVAL;
  674. cz_smu = (struct cz_smumgr *)smumgr->backend;
  675. if (cz_smu) {
  676. cgs_free_gpu_mem(smumgr->device,
  677. cz_smu->toc_buffer.handle);
  678. cgs_free_gpu_mem(smumgr->device,
  679. cz_smu->smu_buffer.handle);
  680. kfree(cz_smu);
  681. }
  682. return 0;
  683. }
  684. const struct pp_smumgr_func cz_smu_funcs = {
  685. .smu_init = cz_smu_init,
  686. .smu_fini = cz_smu_fini,
  687. .start_smu = cz_start_smu,
  688. .check_fw_load_finish = cz_check_fw_load_finish,
  689. .request_smu_load_fw = NULL,
  690. .request_smu_load_specific_fw = NULL,
  691. .get_argument = cz_smum_get_argument,
  692. .send_msg_to_smc = cz_send_msg_to_smc,
  693. .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
  694. .download_pptable_settings = cz_download_pptable_settings,
  695. .upload_pptable_settings = cz_upload_pptable_settings,
  696. };