cz_smc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "smu8.h"
  27. #include "smu8_fusion.h"
  28. #include "cz_ppsmc.h"
  29. #include "cz_smumgr.h"
  30. #include "smu_ucode_xfer_cz.h"
  31. #include "amdgpu_ucode.h"
  32. #include "smu/smu_8_0_d.h"
  33. #include "smu/smu_8_0_sh_mask.h"
  34. #include "gca/gfx_8_0_d.h"
  35. #include "gca/gfx_8_0_sh_mask.h"
  36. uint32_t cz_get_argument(struct amdgpu_device *adev)
  37. {
  38. return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
  39. }
  40. static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
  41. {
  42. struct cz_smu_private_data *priv =
  43. (struct cz_smu_private_data *)(adev->smu.priv);
  44. return priv;
  45. }
  46. int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
  47. {
  48. int i;
  49. u32 content = 0, tmp;
  50. for (i = 0; i < adev->usec_timeout; i++) {
  51. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  52. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  53. if (content != tmp)
  54. break;
  55. udelay(1);
  56. }
  57. /* timeout means wrong logic*/
  58. if (i == adev->usec_timeout)
  59. return -EINVAL;
  60. WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
  61. WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
  62. return 0;
  63. }
  64. int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
  65. {
  66. int i;
  67. u32 content = 0, tmp = 0;
  68. if (cz_send_msg_to_smc_async(adev, msg))
  69. return -EINVAL;
  70. for (i = 0; i < adev->usec_timeout; i++) {
  71. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  72. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  73. if (content != tmp)
  74. break;
  75. udelay(1);
  76. }
  77. /* timeout means wrong logic*/
  78. if (i == adev->usec_timeout)
  79. return -EINVAL;
  80. if (PPSMC_Result_OK != tmp) {
  81. dev_err(adev->dev, "SMC Failed to send Message.\n");
  82. return -EINVAL;
  83. }
  84. return 0;
  85. }
  86. int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
  87. u16 msg, u32 parameter)
  88. {
  89. WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
  90. return cz_send_msg_to_smc_async(adev, msg);
  91. }
  92. int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
  93. u16 msg, u32 parameter)
  94. {
  95. WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
  96. return cz_send_msg_to_smc(adev, msg);
  97. }
  98. static int cz_set_smc_sram_address(struct amdgpu_device *adev,
  99. u32 smc_address, u32 limit)
  100. {
  101. if (smc_address & 3)
  102. return -EINVAL;
  103. if ((smc_address + 3) > limit)
  104. return -EINVAL;
  105. WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
  106. return 0;
  107. }
  108. int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  109. u32 *value, u32 limit)
  110. {
  111. int ret;
  112. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  113. if (ret)
  114. return ret;
  115. *value = RREG32(mmMP0PUB_IND_DATA_0);
  116. return 0;
  117. }
  118. int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  119. u32 value, u32 limit)
  120. {
  121. int ret;
  122. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  123. if (ret)
  124. return ret;
  125. WREG32(mmMP0PUB_IND_DATA_0, value);
  126. return 0;
  127. }
  128. static int cz_smu_request_load_fw(struct amdgpu_device *adev)
  129. {
  130. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  131. uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
  132. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  133. cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
  134. /*prepare toc buffers*/
  135. cz_send_msg_to_smc_with_parameter(adev,
  136. PPSMC_MSG_DriverDramAddrHi,
  137. priv->toc_buffer.mc_addr_high);
  138. cz_send_msg_to_smc_with_parameter(adev,
  139. PPSMC_MSG_DriverDramAddrLo,
  140. priv->toc_buffer.mc_addr_low);
  141. cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
  142. /*execute jobs*/
  143. cz_send_msg_to_smc_with_parameter(adev,
  144. PPSMC_MSG_ExecuteJob,
  145. priv->toc_entry_aram);
  146. cz_send_msg_to_smc_with_parameter(adev,
  147. PPSMC_MSG_ExecuteJob,
  148. priv->toc_entry_power_profiling_index);
  149. cz_send_msg_to_smc_with_parameter(adev,
  150. PPSMC_MSG_ExecuteJob,
  151. priv->toc_entry_initialize_index);
  152. return 0;
  153. }
  154. /*
  155. *Check if the FW has been loaded, SMU will not return if loading
  156. *has not finished.
  157. */
  158. static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
  159. uint32_t fw_mask)
  160. {
  161. int i;
  162. uint32_t index = SMN_MP1_SRAM_START_ADDR +
  163. SMU8_FIRMWARE_HEADER_LOCATION +
  164. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  165. WREG32(mmMP0PUB_IND_INDEX, index);
  166. for (i = 0; i < adev->usec_timeout; i++) {
  167. if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
  168. break;
  169. udelay(1);
  170. }
  171. if (i >= adev->usec_timeout) {
  172. dev_err(adev->dev,
  173. "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
  174. fw_mask, RREG32(mmMP0PUB_IND_DATA));
  175. return -EINVAL;
  176. }
  177. return 0;
  178. }
  179. /*
  180. * interfaces for different ip blocks to check firmware loading status
  181. * 0 for success otherwise failed
  182. */
  183. static int cz_smu_check_finished(struct amdgpu_device *adev,
  184. enum AMDGPU_UCODE_ID id)
  185. {
  186. switch (id) {
  187. case AMDGPU_UCODE_ID_SDMA0:
  188. if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
  189. return 0;
  190. break;
  191. case AMDGPU_UCODE_ID_SDMA1:
  192. if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
  193. return 0;
  194. break;
  195. case AMDGPU_UCODE_ID_CP_CE:
  196. if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
  197. return 0;
  198. break;
  199. case AMDGPU_UCODE_ID_CP_PFP:
  200. if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
  201. return 0;
  202. case AMDGPU_UCODE_ID_CP_ME:
  203. if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
  204. return 0;
  205. break;
  206. case AMDGPU_UCODE_ID_CP_MEC1:
  207. if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
  208. return 0;
  209. break;
  210. case AMDGPU_UCODE_ID_CP_MEC2:
  211. if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
  212. return 0;
  213. break;
  214. case AMDGPU_UCODE_ID_RLC_G:
  215. if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
  216. return 0;
  217. break;
  218. case AMDGPU_UCODE_ID_MAXIMUM:
  219. default:
  220. break;
  221. }
  222. return 1;
  223. }
  224. static int cz_load_mec_firmware(struct amdgpu_device *adev)
  225. {
  226. struct amdgpu_firmware_info *ucode =
  227. &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
  228. uint32_t reg_data;
  229. uint32_t tmp;
  230. if (ucode->fw == NULL)
  231. return -EINVAL;
  232. /* Disable MEC parsing/prefetching */
  233. tmp = RREG32(mmCP_MEC_CNTL);
  234. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
  235. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
  236. WREG32(mmCP_MEC_CNTL, tmp);
  237. tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
  238. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
  239. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
  240. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
  241. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
  242. WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
  243. reg_data = lower_32_bits(ucode->mc_addr) &
  244. REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
  245. WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
  246. reg_data = upper_32_bits(ucode->mc_addr) &
  247. REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
  248. WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
  249. return 0;
  250. }
  251. int cz_smu_start(struct amdgpu_device *adev)
  252. {
  253. int ret = 0;
  254. uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
  255. UCODE_ID_SDMA0_MASK |
  256. UCODE_ID_SDMA1_MASK |
  257. UCODE_ID_CP_CE_MASK |
  258. UCODE_ID_CP_ME_MASK |
  259. UCODE_ID_CP_PFP_MASK |
  260. UCODE_ID_CP_MEC_JT1_MASK |
  261. UCODE_ID_CP_MEC_JT2_MASK;
  262. cz_smu_request_load_fw(adev);
  263. ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
  264. if (ret)
  265. return ret;
  266. /* manually load MEC firmware for CZ */
  267. if (adev->asic_type == CHIP_CARRIZO) {
  268. ret = cz_load_mec_firmware(adev);
  269. if (ret) {
  270. dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
  271. return ret;
  272. }
  273. }
  274. /* setup fw load flag */
  275. adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
  276. AMDGPU_SDMA1_UCODE_LOADED |
  277. AMDGPU_CPCE_UCODE_LOADED |
  278. AMDGPU_CPPFP_UCODE_LOADED |
  279. AMDGPU_CPME_UCODE_LOADED |
  280. AMDGPU_CPMEC1_UCODE_LOADED |
  281. AMDGPU_CPMEC2_UCODE_LOADED |
  282. AMDGPU_CPRLC_UCODE_LOADED;
  283. return ret;
  284. }
  285. static uint32_t cz_convert_fw_type(uint32_t fw_type)
  286. {
  287. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  288. switch (fw_type) {
  289. case UCODE_ID_SDMA0:
  290. result = AMDGPU_UCODE_ID_SDMA0;
  291. break;
  292. case UCODE_ID_SDMA1:
  293. result = AMDGPU_UCODE_ID_SDMA1;
  294. break;
  295. case UCODE_ID_CP_CE:
  296. result = AMDGPU_UCODE_ID_CP_CE;
  297. break;
  298. case UCODE_ID_CP_PFP:
  299. result = AMDGPU_UCODE_ID_CP_PFP;
  300. break;
  301. case UCODE_ID_CP_ME:
  302. result = AMDGPU_UCODE_ID_CP_ME;
  303. break;
  304. case UCODE_ID_CP_MEC_JT1:
  305. case UCODE_ID_CP_MEC_JT2:
  306. result = AMDGPU_UCODE_ID_CP_MEC1;
  307. break;
  308. case UCODE_ID_RLC_G:
  309. result = AMDGPU_UCODE_ID_RLC_G;
  310. break;
  311. default:
  312. DRM_ERROR("UCode type is out of range!");
  313. }
  314. return result;
  315. }
  316. static uint8_t cz_smu_translate_firmware_enum_to_arg(
  317. enum cz_scratch_entry firmware_enum)
  318. {
  319. uint8_t ret = 0;
  320. switch (firmware_enum) {
  321. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
  322. ret = UCODE_ID_SDMA0;
  323. break;
  324. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
  325. ret = UCODE_ID_SDMA1;
  326. break;
  327. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
  328. ret = UCODE_ID_CP_CE;
  329. break;
  330. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
  331. ret = UCODE_ID_CP_PFP;
  332. break;
  333. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
  334. ret = UCODE_ID_CP_ME;
  335. break;
  336. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
  337. ret = UCODE_ID_CP_MEC_JT1;
  338. break;
  339. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
  340. ret = UCODE_ID_CP_MEC_JT2;
  341. break;
  342. case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
  343. ret = UCODE_ID_GMCON_RENG;
  344. break;
  345. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
  346. ret = UCODE_ID_RLC_G;
  347. break;
  348. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
  349. ret = UCODE_ID_RLC_SCRATCH;
  350. break;
  351. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
  352. ret = UCODE_ID_RLC_SRM_ARAM;
  353. break;
  354. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
  355. ret = UCODE_ID_RLC_SRM_DRAM;
  356. break;
  357. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
  358. ret = UCODE_ID_DMCU_ERAM;
  359. break;
  360. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
  361. ret = UCODE_ID_DMCU_IRAM;
  362. break;
  363. case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
  364. ret = TASK_ARG_INIT_MM_PWR_LOG;
  365. break;
  366. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
  367. case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
  368. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
  369. case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
  370. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
  371. case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
  372. ret = TASK_ARG_REG_MMIO;
  373. break;
  374. case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
  375. ret = TASK_ARG_INIT_CLK_TABLE;
  376. break;
  377. }
  378. return ret;
  379. }
  380. static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
  381. enum cz_scratch_entry firmware_enum,
  382. struct cz_buffer_entry *entry)
  383. {
  384. uint64_t gpu_addr;
  385. uint32_t data_size;
  386. uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  387. enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
  388. struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
  389. const struct gfx_firmware_header_v1_0 *header;
  390. if (ucode->fw == NULL)
  391. return -EINVAL;
  392. gpu_addr = ucode->mc_addr;
  393. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  394. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  395. if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
  396. (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
  397. gpu_addr += le32_to_cpu(header->jt_offset) << 2;
  398. data_size = le32_to_cpu(header->jt_size) << 2;
  399. }
  400. entry->mc_addr_low = lower_32_bits(gpu_addr);
  401. entry->mc_addr_high = upper_32_bits(gpu_addr);
  402. entry->data_size = data_size;
  403. entry->firmware_ID = firmware_enum;
  404. return 0;
  405. }
  406. static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
  407. enum cz_scratch_entry scratch_type,
  408. uint32_t size_in_byte,
  409. struct cz_buffer_entry *entry)
  410. {
  411. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  412. uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
  413. priv->smu_buffer.mc_addr_low;
  414. mc_addr += size_in_byte;
  415. priv->smu_buffer_used_bytes += size_in_byte;
  416. entry->data_size = size_in_byte;
  417. entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
  418. entry->mc_addr_low = lower_32_bits(mc_addr);
  419. entry->mc_addr_high = upper_32_bits(mc_addr);
  420. entry->firmware_ID = scratch_type;
  421. return 0;
  422. }
  423. static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
  424. enum cz_scratch_entry firmware_enum,
  425. bool is_last)
  426. {
  427. uint8_t i;
  428. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  429. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  430. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  431. task->type = TASK_TYPE_UCODE_LOAD;
  432. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  433. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  434. for (i = 0; i < priv->driver_buffer_length; i++)
  435. if (priv->driver_buffer[i].firmware_ID == firmware_enum)
  436. break;
  437. if (i >= priv->driver_buffer_length) {
  438. dev_err(adev->dev, "Invalid Firmware Type\n");
  439. return -EINVAL;
  440. }
  441. task->addr.low = priv->driver_buffer[i].mc_addr_low;
  442. task->addr.high = priv->driver_buffer[i].mc_addr_high;
  443. task->size_bytes = priv->driver_buffer[i].data_size;
  444. return 0;
  445. }
  446. static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
  447. enum cz_scratch_entry firmware_enum,
  448. uint8_t type, bool is_last)
  449. {
  450. uint8_t i;
  451. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  452. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  453. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  454. task->type = type;
  455. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  456. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  457. for (i = 0; i < priv->scratch_buffer_length; i++)
  458. if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
  459. break;
  460. if (i >= priv->scratch_buffer_length) {
  461. dev_err(adev->dev, "Invalid Firmware Type\n");
  462. return -EINVAL;
  463. }
  464. task->addr.low = priv->scratch_buffer[i].mc_addr_low;
  465. task->addr.high = priv->scratch_buffer[i].mc_addr_high;
  466. task->size_bytes = priv->scratch_buffer[i].data_size;
  467. if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
  468. struct cz_ih_meta_data *pIHReg_restore =
  469. (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
  470. pIHReg_restore->command =
  471. METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
  472. }
  473. return 0;
  474. }
  475. static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
  476. {
  477. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  478. priv->toc_entry_aram = priv->toc_entry_used_count;
  479. cz_smu_populate_single_scratch_task(adev,
  480. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  481. TASK_TYPE_UCODE_SAVE, true);
  482. return 0;
  483. }
  484. static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
  485. {
  486. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  487. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  488. toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
  489. cz_smu_populate_single_scratch_task(adev,
  490. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  491. TASK_TYPE_UCODE_SAVE, false);
  492. cz_smu_populate_single_scratch_task(adev,
  493. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  494. TASK_TYPE_UCODE_SAVE, true);
  495. return 0;
  496. }
  497. static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
  498. {
  499. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  500. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  501. toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
  502. /* populate ucode */
  503. if (adev->firmware.smu_load) {
  504. cz_smu_populate_single_ucode_load_task(adev,
  505. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  506. cz_smu_populate_single_ucode_load_task(adev,
  507. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  508. cz_smu_populate_single_ucode_load_task(adev,
  509. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  510. cz_smu_populate_single_ucode_load_task(adev,
  511. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  512. cz_smu_populate_single_ucode_load_task(adev,
  513. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  514. cz_smu_populate_single_ucode_load_task(adev,
  515. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
  516. }
  517. /* populate scratch */
  518. cz_smu_populate_single_scratch_task(adev,
  519. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  520. TASK_TYPE_UCODE_LOAD, false);
  521. cz_smu_populate_single_scratch_task(adev,
  522. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  523. TASK_TYPE_UCODE_LOAD, false);
  524. cz_smu_populate_single_scratch_task(adev,
  525. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  526. TASK_TYPE_UCODE_LOAD, true);
  527. return 0;
  528. }
  529. static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
  530. {
  531. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  532. priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
  533. cz_smu_populate_single_scratch_task(adev,
  534. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  535. TASK_TYPE_INITIALIZE, true);
  536. return 0;
  537. }
  538. static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
  539. {
  540. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  541. priv->toc_entry_initialize_index = priv->toc_entry_used_count;
  542. if (adev->firmware.smu_load) {
  543. cz_smu_populate_single_ucode_load_task(adev,
  544. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  545. cz_smu_populate_single_ucode_load_task(adev,
  546. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
  547. cz_smu_populate_single_ucode_load_task(adev,
  548. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  549. cz_smu_populate_single_ucode_load_task(adev,
  550. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  551. cz_smu_populate_single_ucode_load_task(adev,
  552. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  553. cz_smu_populate_single_ucode_load_task(adev,
  554. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  555. cz_smu_populate_single_ucode_load_task(adev,
  556. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  557. cz_smu_populate_single_ucode_load_task(adev,
  558. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
  559. }
  560. return 0;
  561. }
  562. static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
  563. {
  564. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  565. priv->toc_entry_clock_table = priv->toc_entry_used_count;
  566. cz_smu_populate_single_scratch_task(adev,
  567. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  568. TASK_TYPE_INITIALIZE, true);
  569. return 0;
  570. }
  571. static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
  572. {
  573. int i;
  574. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  575. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  576. for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
  577. toc->JobList[i] = (uint8_t)IGNORE_JOB;
  578. return 0;
  579. }
  580. /*
  581. * cz smu uninitialization
  582. */
  583. int cz_smu_fini(struct amdgpu_device *adev)
  584. {
  585. amdgpu_bo_unref(&adev->smu.toc_buf);
  586. amdgpu_bo_unref(&adev->smu.smu_buf);
  587. kfree(adev->smu.priv);
  588. adev->smu.priv = NULL;
  589. if (adev->firmware.smu_load)
  590. amdgpu_ucode_fini_bo(adev);
  591. return 0;
  592. }
  593. int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
  594. {
  595. uint8_t i;
  596. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  597. for (i = 0; i < priv->scratch_buffer_length; i++)
  598. if (priv->scratch_buffer[i].firmware_ID ==
  599. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  600. break;
  601. if (i >= priv->scratch_buffer_length) {
  602. dev_err(adev->dev, "Invalid Scratch Type\n");
  603. return -EINVAL;
  604. }
  605. *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
  606. /* prepare buffer for pptable */
  607. cz_send_msg_to_smc_with_parameter(adev,
  608. PPSMC_MSG_SetClkTableAddrHi,
  609. priv->scratch_buffer[i].mc_addr_high);
  610. cz_send_msg_to_smc_with_parameter(adev,
  611. PPSMC_MSG_SetClkTableAddrLo,
  612. priv->scratch_buffer[i].mc_addr_low);
  613. cz_send_msg_to_smc_with_parameter(adev,
  614. PPSMC_MSG_ExecuteJob,
  615. priv->toc_entry_clock_table);
  616. /* actual downloading */
  617. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
  618. return 0;
  619. }
  620. int cz_smu_upload_pptable(struct amdgpu_device *adev)
  621. {
  622. uint8_t i;
  623. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  624. for (i = 0; i < priv->scratch_buffer_length; i++)
  625. if (priv->scratch_buffer[i].firmware_ID ==
  626. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  627. break;
  628. if (i >= priv->scratch_buffer_length) {
  629. dev_err(adev->dev, "Invalid Scratch Type\n");
  630. return -EINVAL;
  631. }
  632. /* prepare SMU */
  633. cz_send_msg_to_smc_with_parameter(adev,
  634. PPSMC_MSG_SetClkTableAddrHi,
  635. priv->scratch_buffer[i].mc_addr_high);
  636. cz_send_msg_to_smc_with_parameter(adev,
  637. PPSMC_MSG_SetClkTableAddrLo,
  638. priv->scratch_buffer[i].mc_addr_low);
  639. cz_send_msg_to_smc_with_parameter(adev,
  640. PPSMC_MSG_ExecuteJob,
  641. priv->toc_entry_clock_table);
  642. /* actual uploading */
  643. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
  644. return 0;
  645. }
  646. /*
  647. * cz smumgr functions initialization
  648. */
  649. static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
  650. .check_fw_load_finish = cz_smu_check_finished,
  651. .request_smu_load_fw = NULL,
  652. .request_smu_specific_fw = NULL,
  653. };
  654. /*
  655. * cz smu initialization
  656. */
  657. int cz_smu_init(struct amdgpu_device *adev)
  658. {
  659. int ret = -EINVAL;
  660. uint64_t mc_addr = 0;
  661. struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
  662. struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
  663. void *toc_buf_ptr = NULL;
  664. void *smu_buf_ptr = NULL;
  665. struct cz_smu_private_data *priv =
  666. kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
  667. if (priv == NULL)
  668. return -ENOMEM;
  669. /* allocate firmware buffers */
  670. if (adev->firmware.smu_load)
  671. amdgpu_ucode_init_bo(adev);
  672. adev->smu.priv = priv;
  673. adev->smu.fw_flags = 0;
  674. priv->toc_buffer.data_size = 4096;
  675. priv->smu_buffer.data_size =
  676. ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
  677. ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
  678. ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
  679. ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
  680. ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
  681. /* prepare toc buffer and smu buffer:
  682. * 1. create amdgpu_bo for toc buffer and smu buffer
  683. * 2. pin mc address
  684. * 3. map kernel virtual address
  685. */
  686. ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
  687. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
  688. if (ret) {
  689. dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
  690. return ret;
  691. }
  692. ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
  693. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
  694. if (ret) {
  695. dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
  696. return ret;
  697. }
  698. /* toc buffer reserve/pin/map */
  699. ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
  700. if (ret) {
  701. amdgpu_bo_unref(&adev->smu.toc_buf);
  702. dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
  703. return ret;
  704. }
  705. ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  706. if (ret) {
  707. amdgpu_bo_unreserve(adev->smu.toc_buf);
  708. amdgpu_bo_unref(&adev->smu.toc_buf);
  709. dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
  710. return ret;
  711. }
  712. ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
  713. if (ret)
  714. goto smu_init_failed;
  715. amdgpu_bo_unreserve(adev->smu.toc_buf);
  716. priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
  717. priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
  718. priv->toc_buffer.kaddr = toc_buf_ptr;
  719. /* smu buffer reserve/pin/map */
  720. ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
  721. if (ret) {
  722. amdgpu_bo_unref(&adev->smu.smu_buf);
  723. dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
  724. return ret;
  725. }
  726. ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  727. if (ret) {
  728. amdgpu_bo_unreserve(adev->smu.smu_buf);
  729. amdgpu_bo_unref(&adev->smu.smu_buf);
  730. dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
  731. return ret;
  732. }
  733. ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
  734. if (ret)
  735. goto smu_init_failed;
  736. amdgpu_bo_unreserve(adev->smu.smu_buf);
  737. priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
  738. priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
  739. priv->smu_buffer.kaddr = smu_buf_ptr;
  740. if (adev->firmware.smu_load) {
  741. if (cz_smu_populate_single_firmware_entry(adev,
  742. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  743. &priv->driver_buffer[priv->driver_buffer_length++]))
  744. goto smu_init_failed;
  745. if (cz_smu_populate_single_firmware_entry(adev,
  746. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
  747. &priv->driver_buffer[priv->driver_buffer_length++]))
  748. goto smu_init_failed;
  749. if (cz_smu_populate_single_firmware_entry(adev,
  750. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
  751. &priv->driver_buffer[priv->driver_buffer_length++]))
  752. goto smu_init_failed;
  753. if (cz_smu_populate_single_firmware_entry(adev,
  754. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
  755. &priv->driver_buffer[priv->driver_buffer_length++]))
  756. goto smu_init_failed;
  757. if (cz_smu_populate_single_firmware_entry(adev,
  758. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
  759. &priv->driver_buffer[priv->driver_buffer_length++]))
  760. goto smu_init_failed;
  761. if (cz_smu_populate_single_firmware_entry(adev,
  762. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  763. &priv->driver_buffer[priv->driver_buffer_length++]))
  764. goto smu_init_failed;
  765. if (cz_smu_populate_single_firmware_entry(adev,
  766. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
  767. &priv->driver_buffer[priv->driver_buffer_length++]))
  768. goto smu_init_failed;
  769. if (cz_smu_populate_single_firmware_entry(adev,
  770. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
  771. &priv->driver_buffer[priv->driver_buffer_length++]))
  772. goto smu_init_failed;
  773. }
  774. if (cz_smu_populate_single_scratch_entry(adev,
  775. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  776. UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
  777. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  778. goto smu_init_failed;
  779. if (cz_smu_populate_single_scratch_entry(adev,
  780. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  781. UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
  782. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  783. goto smu_init_failed;
  784. if (cz_smu_populate_single_scratch_entry(adev,
  785. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  786. UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
  787. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  788. goto smu_init_failed;
  789. if (cz_smu_populate_single_scratch_entry(adev,
  790. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  791. sizeof(struct SMU8_MultimediaPowerLogData),
  792. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  793. goto smu_init_failed;
  794. if (cz_smu_populate_single_scratch_entry(adev,
  795. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  796. sizeof(struct SMU8_Fusion_ClkTable),
  797. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  798. goto smu_init_failed;
  799. cz_smu_initialize_toc_empty_job_list(adev);
  800. cz_smu_construct_toc_for_rlc_aram_save(adev);
  801. cz_smu_construct_toc_for_vddgfx_enter(adev);
  802. cz_smu_construct_toc_for_vddgfx_exit(adev);
  803. cz_smu_construct_toc_for_power_profiling(adev);
  804. cz_smu_construct_toc_for_bootup(adev);
  805. cz_smu_construct_toc_for_clock_table(adev);
  806. /* init the smumgr functions */
  807. adev->smu.smumgr_funcs = &cz_smumgr_funcs;
  808. return 0;
  809. smu_init_failed:
  810. amdgpu_bo_unref(toc_buf);
  811. amdgpu_bo_unref(smu_buf);
  812. return ret;
  813. }