cz_smc.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "smu8.h"
  27. #include "smu8_fusion.h"
  28. #include "cz_ppsmc.h"
  29. #include "cz_smumgr.h"
  30. #include "smu_ucode_xfer_cz.h"
  31. #include "amdgpu_ucode.h"
  32. #include "cz_dpm.h"
  33. #include "vi_dpm.h"
  34. #include "smu/smu_8_0_d.h"
  35. #include "smu/smu_8_0_sh_mask.h"
  36. #include "gca/gfx_8_0_d.h"
  37. #include "gca/gfx_8_0_sh_mask.h"
  38. uint32_t cz_get_argument(struct amdgpu_device *adev)
  39. {
  40. return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
  41. }
  42. static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
  43. {
  44. struct cz_smu_private_data *priv =
  45. (struct cz_smu_private_data *)(adev->smu.priv);
  46. return priv;
  47. }
  48. static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
  49. {
  50. int i;
  51. u32 content = 0, tmp;
  52. for (i = 0; i < adev->usec_timeout; i++) {
  53. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  54. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  55. if (content != tmp)
  56. break;
  57. udelay(1);
  58. }
  59. /* timeout means wrong logic*/
  60. if (i == adev->usec_timeout)
  61. return -EINVAL;
  62. WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
  63. WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
  64. return 0;
  65. }
  66. int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
  67. {
  68. int i;
  69. u32 content = 0, tmp = 0;
  70. if (cz_send_msg_to_smc_async(adev, msg))
  71. return -EINVAL;
  72. for (i = 0; i < adev->usec_timeout; i++) {
  73. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  74. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  75. if (content != tmp)
  76. break;
  77. udelay(1);
  78. }
  79. /* timeout means wrong logic*/
  80. if (i == adev->usec_timeout)
  81. return -EINVAL;
  82. if (PPSMC_Result_OK != tmp) {
  83. dev_err(adev->dev, "SMC Failed to send Message.\n");
  84. return -EINVAL;
  85. }
  86. return 0;
  87. }
  88. int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
  89. u16 msg, u32 parameter)
  90. {
  91. WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
  92. return cz_send_msg_to_smc_async(adev, msg);
  93. }
  94. int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
  95. u16 msg, u32 parameter)
  96. {
  97. WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
  98. return cz_send_msg_to_smc(adev, msg);
  99. }
  100. static int cz_set_smc_sram_address(struct amdgpu_device *adev,
  101. u32 smc_address, u32 limit)
  102. {
  103. if (smc_address & 3)
  104. return -EINVAL;
  105. if ((smc_address + 3) > limit)
  106. return -EINVAL;
  107. WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
  108. return 0;
  109. }
  110. int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  111. u32 *value, u32 limit)
  112. {
  113. int ret;
  114. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  115. if (ret)
  116. return ret;
  117. *value = RREG32(mmMP0PUB_IND_DATA_0);
  118. return 0;
  119. }
  120. static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  121. u32 value, u32 limit)
  122. {
  123. int ret;
  124. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  125. if (ret)
  126. return ret;
  127. WREG32(mmMP0PUB_IND_DATA_0, value);
  128. return 0;
  129. }
  130. static int cz_smu_request_load_fw(struct amdgpu_device *adev)
  131. {
  132. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  133. uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
  134. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  135. cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
  136. /*prepare toc buffers*/
  137. cz_send_msg_to_smc_with_parameter(adev,
  138. PPSMC_MSG_DriverDramAddrHi,
  139. priv->toc_buffer.mc_addr_high);
  140. cz_send_msg_to_smc_with_parameter(adev,
  141. PPSMC_MSG_DriverDramAddrLo,
  142. priv->toc_buffer.mc_addr_low);
  143. cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
  144. /*execute jobs*/
  145. cz_send_msg_to_smc_with_parameter(adev,
  146. PPSMC_MSG_ExecuteJob,
  147. priv->toc_entry_aram);
  148. cz_send_msg_to_smc_with_parameter(adev,
  149. PPSMC_MSG_ExecuteJob,
  150. priv->toc_entry_power_profiling_index);
  151. cz_send_msg_to_smc_with_parameter(adev,
  152. PPSMC_MSG_ExecuteJob,
  153. priv->toc_entry_initialize_index);
  154. return 0;
  155. }
  156. /*
  157. *Check if the FW has been loaded, SMU will not return if loading
  158. *has not finished.
  159. */
  160. static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
  161. uint32_t fw_mask)
  162. {
  163. int i;
  164. uint32_t index = SMN_MP1_SRAM_START_ADDR +
  165. SMU8_FIRMWARE_HEADER_LOCATION +
  166. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  167. WREG32(mmMP0PUB_IND_INDEX, index);
  168. for (i = 0; i < adev->usec_timeout; i++) {
  169. if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
  170. break;
  171. udelay(1);
  172. }
  173. if (i >= adev->usec_timeout) {
  174. dev_err(adev->dev,
  175. "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
  176. fw_mask, RREG32(mmMP0PUB_IND_DATA));
  177. return -EINVAL;
  178. }
  179. return 0;
  180. }
  181. /*
  182. * interfaces for different ip blocks to check firmware loading status
  183. * 0 for success otherwise failed
  184. */
  185. static int cz_smu_check_finished(struct amdgpu_device *adev,
  186. enum AMDGPU_UCODE_ID id)
  187. {
  188. switch (id) {
  189. case AMDGPU_UCODE_ID_SDMA0:
  190. if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
  191. return 0;
  192. break;
  193. case AMDGPU_UCODE_ID_SDMA1:
  194. if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
  195. return 0;
  196. break;
  197. case AMDGPU_UCODE_ID_CP_CE:
  198. if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
  199. return 0;
  200. break;
  201. case AMDGPU_UCODE_ID_CP_PFP:
  202. if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
  203. return 0;
  204. case AMDGPU_UCODE_ID_CP_ME:
  205. if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
  206. return 0;
  207. break;
  208. case AMDGPU_UCODE_ID_CP_MEC1:
  209. if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
  210. return 0;
  211. break;
  212. case AMDGPU_UCODE_ID_CP_MEC2:
  213. if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
  214. return 0;
  215. break;
  216. case AMDGPU_UCODE_ID_RLC_G:
  217. if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
  218. return 0;
  219. break;
  220. case AMDGPU_UCODE_ID_MAXIMUM:
  221. default:
  222. break;
  223. }
  224. return 1;
  225. }
  226. static int cz_load_mec_firmware(struct amdgpu_device *adev)
  227. {
  228. struct amdgpu_firmware_info *ucode =
  229. &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
  230. uint32_t reg_data;
  231. uint32_t tmp;
  232. if (ucode->fw == NULL)
  233. return -EINVAL;
  234. /* Disable MEC parsing/prefetching */
  235. tmp = RREG32(mmCP_MEC_CNTL);
  236. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
  237. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
  238. WREG32(mmCP_MEC_CNTL, tmp);
  239. tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
  240. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
  241. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
  242. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
  243. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
  244. WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
  245. reg_data = lower_32_bits(ucode->mc_addr) &
  246. REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
  247. WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
  248. reg_data = upper_32_bits(ucode->mc_addr) &
  249. REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
  250. WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
  251. return 0;
  252. }
  253. int cz_smu_start(struct amdgpu_device *adev)
  254. {
  255. int ret = 0;
  256. uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
  257. UCODE_ID_SDMA0_MASK |
  258. UCODE_ID_SDMA1_MASK |
  259. UCODE_ID_CP_CE_MASK |
  260. UCODE_ID_CP_ME_MASK |
  261. UCODE_ID_CP_PFP_MASK |
  262. UCODE_ID_CP_MEC_JT1_MASK |
  263. UCODE_ID_CP_MEC_JT2_MASK;
  264. if (adev->asic_type == CHIP_STONEY)
  265. fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
  266. cz_smu_request_load_fw(adev);
  267. ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
  268. if (ret)
  269. return ret;
  270. /* manually load MEC firmware for CZ */
  271. if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
  272. ret = cz_load_mec_firmware(adev);
  273. if (ret) {
  274. dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
  275. return ret;
  276. }
  277. }
  278. /* setup fw load flag */
  279. adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
  280. AMDGPU_SDMA1_UCODE_LOADED |
  281. AMDGPU_CPCE_UCODE_LOADED |
  282. AMDGPU_CPPFP_UCODE_LOADED |
  283. AMDGPU_CPME_UCODE_LOADED |
  284. AMDGPU_CPMEC1_UCODE_LOADED |
  285. AMDGPU_CPMEC2_UCODE_LOADED |
  286. AMDGPU_CPRLC_UCODE_LOADED;
  287. if (adev->asic_type == CHIP_STONEY)
  288. adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
  289. return ret;
  290. }
  291. static uint32_t cz_convert_fw_type(uint32_t fw_type)
  292. {
  293. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  294. switch (fw_type) {
  295. case UCODE_ID_SDMA0:
  296. result = AMDGPU_UCODE_ID_SDMA0;
  297. break;
  298. case UCODE_ID_SDMA1:
  299. result = AMDGPU_UCODE_ID_SDMA1;
  300. break;
  301. case UCODE_ID_CP_CE:
  302. result = AMDGPU_UCODE_ID_CP_CE;
  303. break;
  304. case UCODE_ID_CP_PFP:
  305. result = AMDGPU_UCODE_ID_CP_PFP;
  306. break;
  307. case UCODE_ID_CP_ME:
  308. result = AMDGPU_UCODE_ID_CP_ME;
  309. break;
  310. case UCODE_ID_CP_MEC_JT1:
  311. case UCODE_ID_CP_MEC_JT2:
  312. result = AMDGPU_UCODE_ID_CP_MEC1;
  313. break;
  314. case UCODE_ID_RLC_G:
  315. result = AMDGPU_UCODE_ID_RLC_G;
  316. break;
  317. default:
  318. DRM_ERROR("UCode type is out of range!");
  319. }
  320. return result;
  321. }
  322. static uint8_t cz_smu_translate_firmware_enum_to_arg(
  323. enum cz_scratch_entry firmware_enum)
  324. {
  325. uint8_t ret = 0;
  326. switch (firmware_enum) {
  327. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
  328. ret = UCODE_ID_SDMA0;
  329. break;
  330. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
  331. ret = UCODE_ID_SDMA1;
  332. break;
  333. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
  334. ret = UCODE_ID_CP_CE;
  335. break;
  336. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
  337. ret = UCODE_ID_CP_PFP;
  338. break;
  339. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
  340. ret = UCODE_ID_CP_ME;
  341. break;
  342. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
  343. ret = UCODE_ID_CP_MEC_JT1;
  344. break;
  345. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
  346. ret = UCODE_ID_CP_MEC_JT2;
  347. break;
  348. case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
  349. ret = UCODE_ID_GMCON_RENG;
  350. break;
  351. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
  352. ret = UCODE_ID_RLC_G;
  353. break;
  354. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
  355. ret = UCODE_ID_RLC_SCRATCH;
  356. break;
  357. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
  358. ret = UCODE_ID_RLC_SRM_ARAM;
  359. break;
  360. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
  361. ret = UCODE_ID_RLC_SRM_DRAM;
  362. break;
  363. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
  364. ret = UCODE_ID_DMCU_ERAM;
  365. break;
  366. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
  367. ret = UCODE_ID_DMCU_IRAM;
  368. break;
  369. case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
  370. ret = TASK_ARG_INIT_MM_PWR_LOG;
  371. break;
  372. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
  373. case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
  374. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
  375. case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
  376. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
  377. case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
  378. ret = TASK_ARG_REG_MMIO;
  379. break;
  380. case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
  381. ret = TASK_ARG_INIT_CLK_TABLE;
  382. break;
  383. }
  384. return ret;
  385. }
  386. static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
  387. enum cz_scratch_entry firmware_enum,
  388. struct cz_buffer_entry *entry)
  389. {
  390. uint64_t gpu_addr;
  391. uint32_t data_size;
  392. uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  393. enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
  394. struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
  395. const struct gfx_firmware_header_v1_0 *header;
  396. if (ucode->fw == NULL)
  397. return -EINVAL;
  398. gpu_addr = ucode->mc_addr;
  399. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  400. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  401. if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
  402. (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
  403. gpu_addr += le32_to_cpu(header->jt_offset) << 2;
  404. data_size = le32_to_cpu(header->jt_size) << 2;
  405. }
  406. entry->mc_addr_low = lower_32_bits(gpu_addr);
  407. entry->mc_addr_high = upper_32_bits(gpu_addr);
  408. entry->data_size = data_size;
  409. entry->firmware_ID = firmware_enum;
  410. return 0;
  411. }
  412. static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
  413. enum cz_scratch_entry scratch_type,
  414. uint32_t size_in_byte,
  415. struct cz_buffer_entry *entry)
  416. {
  417. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  418. uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
  419. priv->smu_buffer.mc_addr_low;
  420. mc_addr += size_in_byte;
  421. priv->smu_buffer_used_bytes += size_in_byte;
  422. entry->data_size = size_in_byte;
  423. entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
  424. entry->mc_addr_low = lower_32_bits(mc_addr);
  425. entry->mc_addr_high = upper_32_bits(mc_addr);
  426. entry->firmware_ID = scratch_type;
  427. return 0;
  428. }
  429. static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
  430. enum cz_scratch_entry firmware_enum,
  431. bool is_last)
  432. {
  433. uint8_t i;
  434. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  435. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  436. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  437. task->type = TASK_TYPE_UCODE_LOAD;
  438. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  439. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  440. for (i = 0; i < priv->driver_buffer_length; i++)
  441. if (priv->driver_buffer[i].firmware_ID == firmware_enum)
  442. break;
  443. if (i >= priv->driver_buffer_length) {
  444. dev_err(adev->dev, "Invalid Firmware Type\n");
  445. return -EINVAL;
  446. }
  447. task->addr.low = priv->driver_buffer[i].mc_addr_low;
  448. task->addr.high = priv->driver_buffer[i].mc_addr_high;
  449. task->size_bytes = priv->driver_buffer[i].data_size;
  450. return 0;
  451. }
  452. static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
  453. enum cz_scratch_entry firmware_enum,
  454. uint8_t type, bool is_last)
  455. {
  456. uint8_t i;
  457. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  458. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  459. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  460. task->type = type;
  461. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  462. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  463. for (i = 0; i < priv->scratch_buffer_length; i++)
  464. if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
  465. break;
  466. if (i >= priv->scratch_buffer_length) {
  467. dev_err(adev->dev, "Invalid Firmware Type\n");
  468. return -EINVAL;
  469. }
  470. task->addr.low = priv->scratch_buffer[i].mc_addr_low;
  471. task->addr.high = priv->scratch_buffer[i].mc_addr_high;
  472. task->size_bytes = priv->scratch_buffer[i].data_size;
  473. if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
  474. struct cz_ih_meta_data *pIHReg_restore =
  475. (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
  476. pIHReg_restore->command =
  477. METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
  478. }
  479. return 0;
  480. }
  481. static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
  482. {
  483. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  484. priv->toc_entry_aram = priv->toc_entry_used_count;
  485. cz_smu_populate_single_scratch_task(adev,
  486. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  487. TASK_TYPE_UCODE_SAVE, true);
  488. return 0;
  489. }
  490. static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
  491. {
  492. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  493. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  494. toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
  495. cz_smu_populate_single_scratch_task(adev,
  496. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  497. TASK_TYPE_UCODE_SAVE, false);
  498. cz_smu_populate_single_scratch_task(adev,
  499. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  500. TASK_TYPE_UCODE_SAVE, true);
  501. return 0;
  502. }
  503. static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
  504. {
  505. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  506. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  507. toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
  508. /* populate ucode */
  509. if (adev->firmware.smu_load) {
  510. cz_smu_populate_single_ucode_load_task(adev,
  511. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  512. cz_smu_populate_single_ucode_load_task(adev,
  513. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  514. cz_smu_populate_single_ucode_load_task(adev,
  515. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  516. cz_smu_populate_single_ucode_load_task(adev,
  517. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  518. if (adev->asic_type == CHIP_STONEY) {
  519. cz_smu_populate_single_ucode_load_task(adev,
  520. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  521. } else {
  522. cz_smu_populate_single_ucode_load_task(adev,
  523. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  524. }
  525. cz_smu_populate_single_ucode_load_task(adev,
  526. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
  527. }
  528. /* populate scratch */
  529. cz_smu_populate_single_scratch_task(adev,
  530. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  531. TASK_TYPE_UCODE_LOAD, false);
  532. cz_smu_populate_single_scratch_task(adev,
  533. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  534. TASK_TYPE_UCODE_LOAD, false);
  535. cz_smu_populate_single_scratch_task(adev,
  536. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  537. TASK_TYPE_UCODE_LOAD, true);
  538. return 0;
  539. }
  540. static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
  541. {
  542. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  543. priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
  544. cz_smu_populate_single_scratch_task(adev,
  545. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  546. TASK_TYPE_INITIALIZE, true);
  547. return 0;
  548. }
  549. static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
  550. {
  551. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  552. priv->toc_entry_initialize_index = priv->toc_entry_used_count;
  553. if (adev->firmware.smu_load) {
  554. cz_smu_populate_single_ucode_load_task(adev,
  555. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  556. if (adev->asic_type == CHIP_STONEY) {
  557. cz_smu_populate_single_ucode_load_task(adev,
  558. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  559. } else {
  560. cz_smu_populate_single_ucode_load_task(adev,
  561. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
  562. }
  563. cz_smu_populate_single_ucode_load_task(adev,
  564. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  565. cz_smu_populate_single_ucode_load_task(adev,
  566. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  567. cz_smu_populate_single_ucode_load_task(adev,
  568. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  569. cz_smu_populate_single_ucode_load_task(adev,
  570. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  571. if (adev->asic_type == CHIP_STONEY) {
  572. cz_smu_populate_single_ucode_load_task(adev,
  573. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  574. } else {
  575. cz_smu_populate_single_ucode_load_task(adev,
  576. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  577. }
  578. cz_smu_populate_single_ucode_load_task(adev,
  579. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
  580. }
  581. return 0;
  582. }
  583. static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
  584. {
  585. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  586. priv->toc_entry_clock_table = priv->toc_entry_used_count;
  587. cz_smu_populate_single_scratch_task(adev,
  588. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  589. TASK_TYPE_INITIALIZE, true);
  590. return 0;
  591. }
  592. static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
  593. {
  594. int i;
  595. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  596. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  597. for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
  598. toc->JobList[i] = (uint8_t)IGNORE_JOB;
  599. return 0;
  600. }
  601. /*
  602. * cz smu uninitialization
  603. */
  604. int cz_smu_fini(struct amdgpu_device *adev)
  605. {
  606. amdgpu_bo_unref(&adev->smu.toc_buf);
  607. amdgpu_bo_unref(&adev->smu.smu_buf);
  608. kfree(adev->smu.priv);
  609. adev->smu.priv = NULL;
  610. if (adev->firmware.smu_load)
  611. amdgpu_ucode_fini_bo(adev);
  612. return 0;
  613. }
  614. int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
  615. {
  616. uint8_t i;
  617. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  618. for (i = 0; i < priv->scratch_buffer_length; i++)
  619. if (priv->scratch_buffer[i].firmware_ID ==
  620. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  621. break;
  622. if (i >= priv->scratch_buffer_length) {
  623. dev_err(adev->dev, "Invalid Scratch Type\n");
  624. return -EINVAL;
  625. }
  626. *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
  627. /* prepare buffer for pptable */
  628. cz_send_msg_to_smc_with_parameter(adev,
  629. PPSMC_MSG_SetClkTableAddrHi,
  630. priv->scratch_buffer[i].mc_addr_high);
  631. cz_send_msg_to_smc_with_parameter(adev,
  632. PPSMC_MSG_SetClkTableAddrLo,
  633. priv->scratch_buffer[i].mc_addr_low);
  634. cz_send_msg_to_smc_with_parameter(adev,
  635. PPSMC_MSG_ExecuteJob,
  636. priv->toc_entry_clock_table);
  637. /* actual downloading */
  638. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
  639. return 0;
  640. }
  641. int cz_smu_upload_pptable(struct amdgpu_device *adev)
  642. {
  643. uint8_t i;
  644. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  645. for (i = 0; i < priv->scratch_buffer_length; i++)
  646. if (priv->scratch_buffer[i].firmware_ID ==
  647. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  648. break;
  649. if (i >= priv->scratch_buffer_length) {
  650. dev_err(adev->dev, "Invalid Scratch Type\n");
  651. return -EINVAL;
  652. }
  653. /* prepare SMU */
  654. cz_send_msg_to_smc_with_parameter(adev,
  655. PPSMC_MSG_SetClkTableAddrHi,
  656. priv->scratch_buffer[i].mc_addr_high);
  657. cz_send_msg_to_smc_with_parameter(adev,
  658. PPSMC_MSG_SetClkTableAddrLo,
  659. priv->scratch_buffer[i].mc_addr_low);
  660. cz_send_msg_to_smc_with_parameter(adev,
  661. PPSMC_MSG_ExecuteJob,
  662. priv->toc_entry_clock_table);
  663. /* actual uploading */
  664. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
  665. return 0;
  666. }
  667. /*
  668. * cz smumgr functions initialization
  669. */
  670. static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
  671. .check_fw_load_finish = cz_smu_check_finished,
  672. .request_smu_load_fw = NULL,
  673. .request_smu_specific_fw = NULL,
  674. };
  675. /*
  676. * cz smu initialization
  677. */
  678. int cz_smu_init(struct amdgpu_device *adev)
  679. {
  680. int ret = -EINVAL;
  681. uint64_t mc_addr = 0;
  682. struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
  683. struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
  684. void *toc_buf_ptr = NULL;
  685. void *smu_buf_ptr = NULL;
  686. struct cz_smu_private_data *priv =
  687. kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
  688. if (priv == NULL)
  689. return -ENOMEM;
  690. /* allocate firmware buffers */
  691. if (adev->firmware.smu_load)
  692. amdgpu_ucode_init_bo(adev);
  693. adev->smu.priv = priv;
  694. adev->smu.fw_flags = 0;
  695. priv->toc_buffer.data_size = 4096;
  696. priv->smu_buffer.data_size =
  697. ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
  698. ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
  699. ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
  700. ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
  701. ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
  702. /* prepare toc buffer and smu buffer:
  703. * 1. create amdgpu_bo for toc buffer and smu buffer
  704. * 2. pin mc address
  705. * 3. map kernel virtual address
  706. */
  707. ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
  708. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  709. toc_buf);
  710. if (ret) {
  711. dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
  712. return ret;
  713. }
  714. ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
  715. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  716. smu_buf);
  717. if (ret) {
  718. dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
  719. return ret;
  720. }
  721. /* toc buffer reserve/pin/map */
  722. ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
  723. if (ret) {
  724. amdgpu_bo_unref(&adev->smu.toc_buf);
  725. dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
  726. return ret;
  727. }
  728. ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  729. if (ret) {
  730. amdgpu_bo_unreserve(adev->smu.toc_buf);
  731. amdgpu_bo_unref(&adev->smu.toc_buf);
  732. dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
  733. return ret;
  734. }
  735. ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
  736. if (ret)
  737. goto smu_init_failed;
  738. amdgpu_bo_unreserve(adev->smu.toc_buf);
  739. priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
  740. priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
  741. priv->toc_buffer.kaddr = toc_buf_ptr;
  742. /* smu buffer reserve/pin/map */
  743. ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
  744. if (ret) {
  745. amdgpu_bo_unref(&adev->smu.smu_buf);
  746. dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
  747. return ret;
  748. }
  749. ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  750. if (ret) {
  751. amdgpu_bo_unreserve(adev->smu.smu_buf);
  752. amdgpu_bo_unref(&adev->smu.smu_buf);
  753. dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
  754. return ret;
  755. }
  756. ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
  757. if (ret)
  758. goto smu_init_failed;
  759. amdgpu_bo_unreserve(adev->smu.smu_buf);
  760. priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
  761. priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
  762. priv->smu_buffer.kaddr = smu_buf_ptr;
  763. if (adev->firmware.smu_load) {
  764. if (cz_smu_populate_single_firmware_entry(adev,
  765. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  766. &priv->driver_buffer[priv->driver_buffer_length++]))
  767. goto smu_init_failed;
  768. if (adev->asic_type == CHIP_STONEY) {
  769. if (cz_smu_populate_single_firmware_entry(adev,
  770. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  771. &priv->driver_buffer[priv->driver_buffer_length++]))
  772. goto smu_init_failed;
  773. } else {
  774. if (cz_smu_populate_single_firmware_entry(adev,
  775. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
  776. &priv->driver_buffer[priv->driver_buffer_length++]))
  777. goto smu_init_failed;
  778. }
  779. if (cz_smu_populate_single_firmware_entry(adev,
  780. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
  781. &priv->driver_buffer[priv->driver_buffer_length++]))
  782. goto smu_init_failed;
  783. if (cz_smu_populate_single_firmware_entry(adev,
  784. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
  785. &priv->driver_buffer[priv->driver_buffer_length++]))
  786. goto smu_init_failed;
  787. if (cz_smu_populate_single_firmware_entry(adev,
  788. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
  789. &priv->driver_buffer[priv->driver_buffer_length++]))
  790. goto smu_init_failed;
  791. if (cz_smu_populate_single_firmware_entry(adev,
  792. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  793. &priv->driver_buffer[priv->driver_buffer_length++]))
  794. goto smu_init_failed;
  795. if (adev->asic_type == CHIP_STONEY) {
  796. if (cz_smu_populate_single_firmware_entry(adev,
  797. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  798. &priv->driver_buffer[priv->driver_buffer_length++]))
  799. goto smu_init_failed;
  800. } else {
  801. if (cz_smu_populate_single_firmware_entry(adev,
  802. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
  803. &priv->driver_buffer[priv->driver_buffer_length++]))
  804. goto smu_init_failed;
  805. }
  806. if (cz_smu_populate_single_firmware_entry(adev,
  807. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
  808. &priv->driver_buffer[priv->driver_buffer_length++]))
  809. goto smu_init_failed;
  810. }
  811. if (cz_smu_populate_single_scratch_entry(adev,
  812. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  813. UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
  814. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  815. goto smu_init_failed;
  816. if (cz_smu_populate_single_scratch_entry(adev,
  817. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  818. UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
  819. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  820. goto smu_init_failed;
  821. if (cz_smu_populate_single_scratch_entry(adev,
  822. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  823. UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
  824. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  825. goto smu_init_failed;
  826. if (cz_smu_populate_single_scratch_entry(adev,
  827. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  828. sizeof(struct SMU8_MultimediaPowerLogData),
  829. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  830. goto smu_init_failed;
  831. if (cz_smu_populate_single_scratch_entry(adev,
  832. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  833. sizeof(struct SMU8_Fusion_ClkTable),
  834. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  835. goto smu_init_failed;
  836. cz_smu_initialize_toc_empty_job_list(adev);
  837. cz_smu_construct_toc_for_rlc_aram_save(adev);
  838. cz_smu_construct_toc_for_vddgfx_enter(adev);
  839. cz_smu_construct_toc_for_vddgfx_exit(adev);
  840. cz_smu_construct_toc_for_power_profiling(adev);
  841. cz_smu_construct_toc_for_bootup(adev);
  842. cz_smu_construct_toc_for_clock_table(adev);
  843. /* init the smumgr functions */
  844. adev->smu.smumgr_funcs = &cz_smumgr_funcs;
  845. return 0;
  846. smu_init_failed:
  847. amdgpu_bo_unref(toc_buf);
  848. amdgpu_bo_unref(smu_buf);
  849. return ret;
  850. }