cz_smc.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "smu8.h"
  27. #include "smu8_fusion.h"
  28. #include "cz_ppsmc.h"
  29. #include "cz_smumgr.h"
  30. #include "smu_ucode_xfer_cz.h"
  31. #include "amdgpu_ucode.h"
  32. #include "cz_dpm.h"
  33. #include "vi_dpm.h"
  34. #include "smu/smu_8_0_d.h"
  35. #include "smu/smu_8_0_sh_mask.h"
  36. #include "gca/gfx_8_0_d.h"
  37. #include "gca/gfx_8_0_sh_mask.h"
  38. uint32_t cz_get_argument(struct amdgpu_device *adev)
  39. {
  40. return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
  41. }
  42. static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
  43. {
  44. struct cz_smu_private_data *priv =
  45. (struct cz_smu_private_data *)(adev->smu.priv);
  46. return priv;
  47. }
  48. static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
  49. {
  50. int i;
  51. u32 content = 0, tmp;
  52. for (i = 0; i < adev->usec_timeout; i++) {
  53. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  54. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  55. if (content != tmp)
  56. break;
  57. udelay(1);
  58. }
  59. /* timeout means wrong logic*/
  60. if (i == adev->usec_timeout)
  61. return -EINVAL;
  62. WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
  63. WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
  64. return 0;
  65. }
  66. int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
  67. {
  68. int i;
  69. u32 content = 0, tmp = 0;
  70. if (cz_send_msg_to_smc_async(adev, msg))
  71. return -EINVAL;
  72. for (i = 0; i < adev->usec_timeout; i++) {
  73. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  74. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  75. if (content != tmp)
  76. break;
  77. udelay(1);
  78. }
  79. /* timeout means wrong logic*/
  80. if (i == adev->usec_timeout)
  81. return -EINVAL;
  82. if (PPSMC_Result_OK != tmp) {
  83. dev_err(adev->dev, "SMC Failed to send Message.\n");
  84. return -EINVAL;
  85. }
  86. return 0;
  87. }
  88. int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
  89. u16 msg, u32 parameter)
  90. {
  91. WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
  92. return cz_send_msg_to_smc(adev, msg);
  93. }
  94. static int cz_set_smc_sram_address(struct amdgpu_device *adev,
  95. u32 smc_address, u32 limit)
  96. {
  97. if (smc_address & 3)
  98. return -EINVAL;
  99. if ((smc_address + 3) > limit)
  100. return -EINVAL;
  101. WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
  102. return 0;
  103. }
  104. int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  105. u32 *value, u32 limit)
  106. {
  107. int ret;
  108. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  109. if (ret)
  110. return ret;
  111. *value = RREG32(mmMP0PUB_IND_DATA_0);
  112. return 0;
  113. }
  114. static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  115. u32 value, u32 limit)
  116. {
  117. int ret;
  118. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  119. if (ret)
  120. return ret;
  121. WREG32(mmMP0PUB_IND_DATA_0, value);
  122. return 0;
  123. }
  124. static int cz_smu_request_load_fw(struct amdgpu_device *adev)
  125. {
  126. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  127. uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
  128. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  129. cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
  130. /*prepare toc buffers*/
  131. cz_send_msg_to_smc_with_parameter(adev,
  132. PPSMC_MSG_DriverDramAddrHi,
  133. priv->toc_buffer.mc_addr_high);
  134. cz_send_msg_to_smc_with_parameter(adev,
  135. PPSMC_MSG_DriverDramAddrLo,
  136. priv->toc_buffer.mc_addr_low);
  137. cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
  138. /*execute jobs*/
  139. cz_send_msg_to_smc_with_parameter(adev,
  140. PPSMC_MSG_ExecuteJob,
  141. priv->toc_entry_aram);
  142. cz_send_msg_to_smc_with_parameter(adev,
  143. PPSMC_MSG_ExecuteJob,
  144. priv->toc_entry_power_profiling_index);
  145. cz_send_msg_to_smc_with_parameter(adev,
  146. PPSMC_MSG_ExecuteJob,
  147. priv->toc_entry_initialize_index);
  148. return 0;
  149. }
  150. /*
  151. *Check if the FW has been loaded, SMU will not return if loading
  152. *has not finished.
  153. */
  154. static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
  155. uint32_t fw_mask)
  156. {
  157. int i;
  158. uint32_t index = SMN_MP1_SRAM_START_ADDR +
  159. SMU8_FIRMWARE_HEADER_LOCATION +
  160. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  161. WREG32(mmMP0PUB_IND_INDEX, index);
  162. for (i = 0; i < adev->usec_timeout; i++) {
  163. if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
  164. break;
  165. udelay(1);
  166. }
  167. if (i >= adev->usec_timeout) {
  168. dev_err(adev->dev,
  169. "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
  170. fw_mask, RREG32(mmMP0PUB_IND_DATA));
  171. return -EINVAL;
  172. }
  173. return 0;
  174. }
  175. /*
  176. * interfaces for different ip blocks to check firmware loading status
  177. * 0 for success otherwise failed
  178. */
  179. static int cz_smu_check_finished(struct amdgpu_device *adev,
  180. enum AMDGPU_UCODE_ID id)
  181. {
  182. switch (id) {
  183. case AMDGPU_UCODE_ID_SDMA0:
  184. if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
  185. return 0;
  186. break;
  187. case AMDGPU_UCODE_ID_SDMA1:
  188. if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
  189. return 0;
  190. break;
  191. case AMDGPU_UCODE_ID_CP_CE:
  192. if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
  193. return 0;
  194. break;
  195. case AMDGPU_UCODE_ID_CP_PFP:
  196. if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
  197. return 0;
  198. case AMDGPU_UCODE_ID_CP_ME:
  199. if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
  200. return 0;
  201. break;
  202. case AMDGPU_UCODE_ID_CP_MEC1:
  203. if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
  204. return 0;
  205. break;
  206. case AMDGPU_UCODE_ID_CP_MEC2:
  207. if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
  208. return 0;
  209. break;
  210. case AMDGPU_UCODE_ID_RLC_G:
  211. if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
  212. return 0;
  213. break;
  214. case AMDGPU_UCODE_ID_MAXIMUM:
  215. default:
  216. break;
  217. }
  218. return 1;
  219. }
  220. static int cz_load_mec_firmware(struct amdgpu_device *adev)
  221. {
  222. struct amdgpu_firmware_info *ucode =
  223. &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
  224. uint32_t reg_data;
  225. uint32_t tmp;
  226. if (ucode->fw == NULL)
  227. return -EINVAL;
  228. /* Disable MEC parsing/prefetching */
  229. tmp = RREG32(mmCP_MEC_CNTL);
  230. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
  231. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
  232. WREG32(mmCP_MEC_CNTL, tmp);
  233. tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
  234. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
  235. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
  236. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
  237. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
  238. WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
  239. reg_data = lower_32_bits(ucode->mc_addr) &
  240. REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
  241. WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
  242. reg_data = upper_32_bits(ucode->mc_addr) &
  243. REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
  244. WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
  245. return 0;
  246. }
  247. int cz_smu_start(struct amdgpu_device *adev)
  248. {
  249. int ret = 0;
  250. uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
  251. UCODE_ID_SDMA0_MASK |
  252. UCODE_ID_SDMA1_MASK |
  253. UCODE_ID_CP_CE_MASK |
  254. UCODE_ID_CP_ME_MASK |
  255. UCODE_ID_CP_PFP_MASK |
  256. UCODE_ID_CP_MEC_JT1_MASK |
  257. UCODE_ID_CP_MEC_JT2_MASK;
  258. if (adev->asic_type == CHIP_STONEY)
  259. fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
  260. cz_smu_request_load_fw(adev);
  261. ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
  262. if (ret)
  263. return ret;
  264. /* manually load MEC firmware for CZ */
  265. if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
  266. ret = cz_load_mec_firmware(adev);
  267. if (ret) {
  268. dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
  269. return ret;
  270. }
  271. }
  272. /* setup fw load flag */
  273. adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
  274. AMDGPU_SDMA1_UCODE_LOADED |
  275. AMDGPU_CPCE_UCODE_LOADED |
  276. AMDGPU_CPPFP_UCODE_LOADED |
  277. AMDGPU_CPME_UCODE_LOADED |
  278. AMDGPU_CPMEC1_UCODE_LOADED |
  279. AMDGPU_CPMEC2_UCODE_LOADED |
  280. AMDGPU_CPRLC_UCODE_LOADED;
  281. if (adev->asic_type == CHIP_STONEY)
  282. adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
  283. return ret;
  284. }
  285. static uint32_t cz_convert_fw_type(uint32_t fw_type)
  286. {
  287. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  288. switch (fw_type) {
  289. case UCODE_ID_SDMA0:
  290. result = AMDGPU_UCODE_ID_SDMA0;
  291. break;
  292. case UCODE_ID_SDMA1:
  293. result = AMDGPU_UCODE_ID_SDMA1;
  294. break;
  295. case UCODE_ID_CP_CE:
  296. result = AMDGPU_UCODE_ID_CP_CE;
  297. break;
  298. case UCODE_ID_CP_PFP:
  299. result = AMDGPU_UCODE_ID_CP_PFP;
  300. break;
  301. case UCODE_ID_CP_ME:
  302. result = AMDGPU_UCODE_ID_CP_ME;
  303. break;
  304. case UCODE_ID_CP_MEC_JT1:
  305. case UCODE_ID_CP_MEC_JT2:
  306. result = AMDGPU_UCODE_ID_CP_MEC1;
  307. break;
  308. case UCODE_ID_RLC_G:
  309. result = AMDGPU_UCODE_ID_RLC_G;
  310. break;
  311. default:
  312. DRM_ERROR("UCode type is out of range!");
  313. }
  314. return result;
  315. }
  316. static uint8_t cz_smu_translate_firmware_enum_to_arg(
  317. enum cz_scratch_entry firmware_enum)
  318. {
  319. uint8_t ret = 0;
  320. switch (firmware_enum) {
  321. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
  322. ret = UCODE_ID_SDMA0;
  323. break;
  324. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
  325. ret = UCODE_ID_SDMA1;
  326. break;
  327. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
  328. ret = UCODE_ID_CP_CE;
  329. break;
  330. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
  331. ret = UCODE_ID_CP_PFP;
  332. break;
  333. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
  334. ret = UCODE_ID_CP_ME;
  335. break;
  336. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
  337. ret = UCODE_ID_CP_MEC_JT1;
  338. break;
  339. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
  340. ret = UCODE_ID_CP_MEC_JT2;
  341. break;
  342. case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
  343. ret = UCODE_ID_GMCON_RENG;
  344. break;
  345. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
  346. ret = UCODE_ID_RLC_G;
  347. break;
  348. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
  349. ret = UCODE_ID_RLC_SCRATCH;
  350. break;
  351. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
  352. ret = UCODE_ID_RLC_SRM_ARAM;
  353. break;
  354. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
  355. ret = UCODE_ID_RLC_SRM_DRAM;
  356. break;
  357. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
  358. ret = UCODE_ID_DMCU_ERAM;
  359. break;
  360. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
  361. ret = UCODE_ID_DMCU_IRAM;
  362. break;
  363. case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
  364. ret = TASK_ARG_INIT_MM_PWR_LOG;
  365. break;
  366. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
  367. case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
  368. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
  369. case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
  370. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
  371. case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
  372. ret = TASK_ARG_REG_MMIO;
  373. break;
  374. case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
  375. ret = TASK_ARG_INIT_CLK_TABLE;
  376. break;
  377. }
  378. return ret;
  379. }
  380. static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
  381. enum cz_scratch_entry firmware_enum,
  382. struct cz_buffer_entry *entry)
  383. {
  384. uint64_t gpu_addr;
  385. uint32_t data_size;
  386. uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  387. enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
  388. struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
  389. const struct gfx_firmware_header_v1_0 *header;
  390. if (ucode->fw == NULL)
  391. return -EINVAL;
  392. gpu_addr = ucode->mc_addr;
  393. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  394. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  395. if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
  396. (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
  397. gpu_addr += le32_to_cpu(header->jt_offset) << 2;
  398. data_size = le32_to_cpu(header->jt_size) << 2;
  399. }
  400. entry->mc_addr_low = lower_32_bits(gpu_addr);
  401. entry->mc_addr_high = upper_32_bits(gpu_addr);
  402. entry->data_size = data_size;
  403. entry->firmware_ID = firmware_enum;
  404. return 0;
  405. }
  406. static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
  407. enum cz_scratch_entry scratch_type,
  408. uint32_t size_in_byte,
  409. struct cz_buffer_entry *entry)
  410. {
  411. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  412. uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
  413. priv->smu_buffer.mc_addr_low;
  414. mc_addr += size_in_byte;
  415. priv->smu_buffer_used_bytes += size_in_byte;
  416. entry->data_size = size_in_byte;
  417. entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
  418. entry->mc_addr_low = lower_32_bits(mc_addr);
  419. entry->mc_addr_high = upper_32_bits(mc_addr);
  420. entry->firmware_ID = scratch_type;
  421. return 0;
  422. }
  423. static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
  424. enum cz_scratch_entry firmware_enum,
  425. bool is_last)
  426. {
  427. uint8_t i;
  428. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  429. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  430. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  431. task->type = TASK_TYPE_UCODE_LOAD;
  432. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  433. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  434. for (i = 0; i < priv->driver_buffer_length; i++)
  435. if (priv->driver_buffer[i].firmware_ID == firmware_enum)
  436. break;
  437. if (i >= priv->driver_buffer_length) {
  438. dev_err(adev->dev, "Invalid Firmware Type\n");
  439. return -EINVAL;
  440. }
  441. task->addr.low = priv->driver_buffer[i].mc_addr_low;
  442. task->addr.high = priv->driver_buffer[i].mc_addr_high;
  443. task->size_bytes = priv->driver_buffer[i].data_size;
  444. return 0;
  445. }
  446. static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
  447. enum cz_scratch_entry firmware_enum,
  448. uint8_t type, bool is_last)
  449. {
  450. uint8_t i;
  451. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  452. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  453. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  454. task->type = type;
  455. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  456. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  457. for (i = 0; i < priv->scratch_buffer_length; i++)
  458. if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
  459. break;
  460. if (i >= priv->scratch_buffer_length) {
  461. dev_err(adev->dev, "Invalid Firmware Type\n");
  462. return -EINVAL;
  463. }
  464. task->addr.low = priv->scratch_buffer[i].mc_addr_low;
  465. task->addr.high = priv->scratch_buffer[i].mc_addr_high;
  466. task->size_bytes = priv->scratch_buffer[i].data_size;
  467. if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
  468. struct cz_ih_meta_data *pIHReg_restore =
  469. (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
  470. pIHReg_restore->command =
  471. METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
  472. }
  473. return 0;
  474. }
  475. static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
  476. {
  477. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  478. priv->toc_entry_aram = priv->toc_entry_used_count;
  479. cz_smu_populate_single_scratch_task(adev,
  480. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  481. TASK_TYPE_UCODE_SAVE, true);
  482. return 0;
  483. }
  484. static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
  485. {
  486. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  487. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  488. toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
  489. cz_smu_populate_single_scratch_task(adev,
  490. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  491. TASK_TYPE_UCODE_SAVE, false);
  492. cz_smu_populate_single_scratch_task(adev,
  493. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  494. TASK_TYPE_UCODE_SAVE, true);
  495. return 0;
  496. }
  497. static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
  498. {
  499. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  500. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  501. toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
  502. /* populate ucode */
  503. if (adev->firmware.smu_load) {
  504. cz_smu_populate_single_ucode_load_task(adev,
  505. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  506. cz_smu_populate_single_ucode_load_task(adev,
  507. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  508. cz_smu_populate_single_ucode_load_task(adev,
  509. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  510. cz_smu_populate_single_ucode_load_task(adev,
  511. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  512. if (adev->asic_type == CHIP_STONEY) {
  513. cz_smu_populate_single_ucode_load_task(adev,
  514. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  515. } else {
  516. cz_smu_populate_single_ucode_load_task(adev,
  517. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  518. }
  519. cz_smu_populate_single_ucode_load_task(adev,
  520. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
  521. }
  522. /* populate scratch */
  523. cz_smu_populate_single_scratch_task(adev,
  524. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  525. TASK_TYPE_UCODE_LOAD, false);
  526. cz_smu_populate_single_scratch_task(adev,
  527. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  528. TASK_TYPE_UCODE_LOAD, false);
  529. cz_smu_populate_single_scratch_task(adev,
  530. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  531. TASK_TYPE_UCODE_LOAD, true);
  532. return 0;
  533. }
  534. static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
  535. {
  536. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  537. priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
  538. cz_smu_populate_single_scratch_task(adev,
  539. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  540. TASK_TYPE_INITIALIZE, true);
  541. return 0;
  542. }
  543. static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
  544. {
  545. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  546. priv->toc_entry_initialize_index = priv->toc_entry_used_count;
  547. if (adev->firmware.smu_load) {
  548. cz_smu_populate_single_ucode_load_task(adev,
  549. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  550. if (adev->asic_type == CHIP_STONEY) {
  551. cz_smu_populate_single_ucode_load_task(adev,
  552. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  553. } else {
  554. cz_smu_populate_single_ucode_load_task(adev,
  555. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
  556. }
  557. cz_smu_populate_single_ucode_load_task(adev,
  558. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  559. cz_smu_populate_single_ucode_load_task(adev,
  560. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  561. cz_smu_populate_single_ucode_load_task(adev,
  562. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  563. cz_smu_populate_single_ucode_load_task(adev,
  564. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  565. if (adev->asic_type == CHIP_STONEY) {
  566. cz_smu_populate_single_ucode_load_task(adev,
  567. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  568. } else {
  569. cz_smu_populate_single_ucode_load_task(adev,
  570. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  571. }
  572. cz_smu_populate_single_ucode_load_task(adev,
  573. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
  574. }
  575. return 0;
  576. }
  577. static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
  578. {
  579. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  580. priv->toc_entry_clock_table = priv->toc_entry_used_count;
  581. cz_smu_populate_single_scratch_task(adev,
  582. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  583. TASK_TYPE_INITIALIZE, true);
  584. return 0;
  585. }
  586. static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
  587. {
  588. int i;
  589. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  590. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  591. for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
  592. toc->JobList[i] = (uint8_t)IGNORE_JOB;
  593. return 0;
  594. }
  595. /*
  596. * cz smu uninitialization
  597. */
  598. int cz_smu_fini(struct amdgpu_device *adev)
  599. {
  600. amdgpu_bo_unref(&adev->smu.toc_buf);
  601. amdgpu_bo_unref(&adev->smu.smu_buf);
  602. kfree(adev->smu.priv);
  603. adev->smu.priv = NULL;
  604. if (adev->firmware.smu_load)
  605. amdgpu_ucode_fini_bo(adev);
  606. return 0;
  607. }
  608. int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
  609. {
  610. uint8_t i;
  611. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  612. for (i = 0; i < priv->scratch_buffer_length; i++)
  613. if (priv->scratch_buffer[i].firmware_ID ==
  614. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  615. break;
  616. if (i >= priv->scratch_buffer_length) {
  617. dev_err(adev->dev, "Invalid Scratch Type\n");
  618. return -EINVAL;
  619. }
  620. *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
  621. /* prepare buffer for pptable */
  622. cz_send_msg_to_smc_with_parameter(adev,
  623. PPSMC_MSG_SetClkTableAddrHi,
  624. priv->scratch_buffer[i].mc_addr_high);
  625. cz_send_msg_to_smc_with_parameter(adev,
  626. PPSMC_MSG_SetClkTableAddrLo,
  627. priv->scratch_buffer[i].mc_addr_low);
  628. cz_send_msg_to_smc_with_parameter(adev,
  629. PPSMC_MSG_ExecuteJob,
  630. priv->toc_entry_clock_table);
  631. /* actual downloading */
  632. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
  633. return 0;
  634. }
  635. int cz_smu_upload_pptable(struct amdgpu_device *adev)
  636. {
  637. uint8_t i;
  638. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  639. for (i = 0; i < priv->scratch_buffer_length; i++)
  640. if (priv->scratch_buffer[i].firmware_ID ==
  641. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  642. break;
  643. if (i >= priv->scratch_buffer_length) {
  644. dev_err(adev->dev, "Invalid Scratch Type\n");
  645. return -EINVAL;
  646. }
  647. /* prepare SMU */
  648. cz_send_msg_to_smc_with_parameter(adev,
  649. PPSMC_MSG_SetClkTableAddrHi,
  650. priv->scratch_buffer[i].mc_addr_high);
  651. cz_send_msg_to_smc_with_parameter(adev,
  652. PPSMC_MSG_SetClkTableAddrLo,
  653. priv->scratch_buffer[i].mc_addr_low);
  654. cz_send_msg_to_smc_with_parameter(adev,
  655. PPSMC_MSG_ExecuteJob,
  656. priv->toc_entry_clock_table);
  657. /* actual uploading */
  658. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
  659. return 0;
  660. }
  661. /*
  662. * cz smumgr functions initialization
  663. */
  664. static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
  665. .check_fw_load_finish = cz_smu_check_finished,
  666. .request_smu_load_fw = NULL,
  667. .request_smu_specific_fw = NULL,
  668. };
  669. /*
  670. * cz smu initialization
  671. */
  672. int cz_smu_init(struct amdgpu_device *adev)
  673. {
  674. int ret = -EINVAL;
  675. uint64_t mc_addr = 0;
  676. struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
  677. struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
  678. void *toc_buf_ptr = NULL;
  679. void *smu_buf_ptr = NULL;
  680. struct cz_smu_private_data *priv =
  681. kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
  682. if (priv == NULL)
  683. return -ENOMEM;
  684. /* allocate firmware buffers */
  685. if (adev->firmware.smu_load)
  686. amdgpu_ucode_init_bo(adev);
  687. adev->smu.priv = priv;
  688. adev->smu.fw_flags = 0;
  689. priv->toc_buffer.data_size = 4096;
  690. priv->smu_buffer.data_size =
  691. ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
  692. ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
  693. ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
  694. ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
  695. ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
  696. /* prepare toc buffer and smu buffer:
  697. * 1. create amdgpu_bo for toc buffer and smu buffer
  698. * 2. pin mc address
  699. * 3. map kernel virtual address
  700. */
  701. ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
  702. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  703. toc_buf);
  704. if (ret) {
  705. dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
  706. return ret;
  707. }
  708. ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
  709. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  710. smu_buf);
  711. if (ret) {
  712. dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
  713. return ret;
  714. }
  715. /* toc buffer reserve/pin/map */
  716. ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
  717. if (ret) {
  718. amdgpu_bo_unref(&adev->smu.toc_buf);
  719. dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
  720. return ret;
  721. }
  722. ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  723. if (ret) {
  724. amdgpu_bo_unreserve(adev->smu.toc_buf);
  725. amdgpu_bo_unref(&adev->smu.toc_buf);
  726. dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
  727. return ret;
  728. }
  729. ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
  730. if (ret)
  731. goto smu_init_failed;
  732. amdgpu_bo_unreserve(adev->smu.toc_buf);
  733. priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
  734. priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
  735. priv->toc_buffer.kaddr = toc_buf_ptr;
  736. /* smu buffer reserve/pin/map */
  737. ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
  738. if (ret) {
  739. amdgpu_bo_unref(&adev->smu.smu_buf);
  740. dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
  741. return ret;
  742. }
  743. ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  744. if (ret) {
  745. amdgpu_bo_unreserve(adev->smu.smu_buf);
  746. amdgpu_bo_unref(&adev->smu.smu_buf);
  747. dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
  748. return ret;
  749. }
  750. ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
  751. if (ret)
  752. goto smu_init_failed;
  753. amdgpu_bo_unreserve(adev->smu.smu_buf);
  754. priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
  755. priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
  756. priv->smu_buffer.kaddr = smu_buf_ptr;
  757. if (adev->firmware.smu_load) {
  758. if (cz_smu_populate_single_firmware_entry(adev,
  759. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  760. &priv->driver_buffer[priv->driver_buffer_length++]))
  761. goto smu_init_failed;
  762. if (adev->asic_type == CHIP_STONEY) {
  763. if (cz_smu_populate_single_firmware_entry(adev,
  764. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  765. &priv->driver_buffer[priv->driver_buffer_length++]))
  766. goto smu_init_failed;
  767. } else {
  768. if (cz_smu_populate_single_firmware_entry(adev,
  769. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
  770. &priv->driver_buffer[priv->driver_buffer_length++]))
  771. goto smu_init_failed;
  772. }
  773. if (cz_smu_populate_single_firmware_entry(adev,
  774. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
  775. &priv->driver_buffer[priv->driver_buffer_length++]))
  776. goto smu_init_failed;
  777. if (cz_smu_populate_single_firmware_entry(adev,
  778. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
  779. &priv->driver_buffer[priv->driver_buffer_length++]))
  780. goto smu_init_failed;
  781. if (cz_smu_populate_single_firmware_entry(adev,
  782. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
  783. &priv->driver_buffer[priv->driver_buffer_length++]))
  784. goto smu_init_failed;
  785. if (cz_smu_populate_single_firmware_entry(adev,
  786. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  787. &priv->driver_buffer[priv->driver_buffer_length++]))
  788. goto smu_init_failed;
  789. if (adev->asic_type == CHIP_STONEY) {
  790. if (cz_smu_populate_single_firmware_entry(adev,
  791. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  792. &priv->driver_buffer[priv->driver_buffer_length++]))
  793. goto smu_init_failed;
  794. } else {
  795. if (cz_smu_populate_single_firmware_entry(adev,
  796. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
  797. &priv->driver_buffer[priv->driver_buffer_length++]))
  798. goto smu_init_failed;
  799. }
  800. if (cz_smu_populate_single_firmware_entry(adev,
  801. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
  802. &priv->driver_buffer[priv->driver_buffer_length++]))
  803. goto smu_init_failed;
  804. }
  805. if (cz_smu_populate_single_scratch_entry(adev,
  806. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  807. UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
  808. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  809. goto smu_init_failed;
  810. if (cz_smu_populate_single_scratch_entry(adev,
  811. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  812. UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
  813. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  814. goto smu_init_failed;
  815. if (cz_smu_populate_single_scratch_entry(adev,
  816. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  817. UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
  818. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  819. goto smu_init_failed;
  820. if (cz_smu_populate_single_scratch_entry(adev,
  821. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  822. sizeof(struct SMU8_MultimediaPowerLogData),
  823. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  824. goto smu_init_failed;
  825. if (cz_smu_populate_single_scratch_entry(adev,
  826. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  827. sizeof(struct SMU8_Fusion_ClkTable),
  828. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  829. goto smu_init_failed;
  830. cz_smu_initialize_toc_empty_job_list(adev);
  831. cz_smu_construct_toc_for_rlc_aram_save(adev);
  832. cz_smu_construct_toc_for_vddgfx_enter(adev);
  833. cz_smu_construct_toc_for_vddgfx_exit(adev);
  834. cz_smu_construct_toc_for_power_profiling(adev);
  835. cz_smu_construct_toc_for_bootup(adev);
  836. cz_smu_construct_toc_for_clock_table(adev);
  837. /* init the smumgr functions */
  838. adev->smu.smumgr_funcs = &cz_smumgr_funcs;
  839. return 0;
  840. smu_init_failed:
  841. amdgpu_bo_unref(toc_buf);
  842. amdgpu_bo_unref(smu_buf);
  843. return ret;
  844. }