amdgpu_ucode.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <drm/drmP.h>
  27. #include "amdgpu.h"
  28. #include "amdgpu_ucode.h"
  29. static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
  30. {
  31. DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
  32. DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
  33. DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
  34. DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
  35. DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
  36. DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
  37. DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
  38. DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
  39. DRM_DEBUG("ucode_array_offset_bytes: %u\n",
  40. le32_to_cpu(hdr->ucode_array_offset_bytes));
  41. DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
  42. }
  43. void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
  44. {
  45. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  46. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  47. DRM_DEBUG("MC\n");
  48. amdgpu_ucode_print_common_hdr(hdr);
  49. if (version_major == 1) {
  50. const struct mc_firmware_header_v1_0 *mc_hdr =
  51. container_of(hdr, struct mc_firmware_header_v1_0, header);
  52. DRM_DEBUG("io_debug_size_bytes: %u\n",
  53. le32_to_cpu(mc_hdr->io_debug_size_bytes));
  54. DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
  55. le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
  56. } else {
  57. DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
  58. }
  59. }
  60. void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
  61. {
  62. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  63. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  64. DRM_DEBUG("SMC\n");
  65. amdgpu_ucode_print_common_hdr(hdr);
  66. if (version_major == 1) {
  67. const struct smc_firmware_header_v1_0 *smc_hdr =
  68. container_of(hdr, struct smc_firmware_header_v1_0, header);
  69. DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
  70. } else {
  71. DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
  72. }
  73. }
  74. void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
  75. {
  76. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  77. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  78. DRM_DEBUG("GFX\n");
  79. amdgpu_ucode_print_common_hdr(hdr);
  80. if (version_major == 1) {
  81. const struct gfx_firmware_header_v1_0 *gfx_hdr =
  82. container_of(hdr, struct gfx_firmware_header_v1_0, header);
  83. DRM_DEBUG("ucode_feature_version: %u\n",
  84. le32_to_cpu(gfx_hdr->ucode_feature_version));
  85. DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
  86. DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
  87. } else {
  88. DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
  89. }
  90. }
  91. void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
  92. {
  93. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  94. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  95. DRM_DEBUG("RLC\n");
  96. amdgpu_ucode_print_common_hdr(hdr);
  97. if (version_major == 1) {
  98. const struct rlc_firmware_header_v1_0 *rlc_hdr =
  99. container_of(hdr, struct rlc_firmware_header_v1_0, header);
  100. DRM_DEBUG("ucode_feature_version: %u\n",
  101. le32_to_cpu(rlc_hdr->ucode_feature_version));
  102. DRM_DEBUG("save_and_restore_offset: %u\n",
  103. le32_to_cpu(rlc_hdr->save_and_restore_offset));
  104. DRM_DEBUG("clear_state_descriptor_offset: %u\n",
  105. le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
  106. DRM_DEBUG("avail_scratch_ram_locations: %u\n",
  107. le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
  108. DRM_DEBUG("master_pkt_description_offset: %u\n",
  109. le32_to_cpu(rlc_hdr->master_pkt_description_offset));
  110. } else if (version_major == 2) {
  111. const struct rlc_firmware_header_v2_0 *rlc_hdr =
  112. container_of(hdr, struct rlc_firmware_header_v2_0, header);
  113. DRM_DEBUG("ucode_feature_version: %u\n",
  114. le32_to_cpu(rlc_hdr->ucode_feature_version));
  115. DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
  116. DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
  117. DRM_DEBUG("save_and_restore_offset: %u\n",
  118. le32_to_cpu(rlc_hdr->save_and_restore_offset));
  119. DRM_DEBUG("clear_state_descriptor_offset: %u\n",
  120. le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
  121. DRM_DEBUG("avail_scratch_ram_locations: %u\n",
  122. le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
  123. DRM_DEBUG("reg_restore_list_size: %u\n",
  124. le32_to_cpu(rlc_hdr->reg_restore_list_size));
  125. DRM_DEBUG("reg_list_format_start: %u\n",
  126. le32_to_cpu(rlc_hdr->reg_list_format_start));
  127. DRM_DEBUG("reg_list_format_separate_start: %u\n",
  128. le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
  129. DRM_DEBUG("starting_offsets_start: %u\n",
  130. le32_to_cpu(rlc_hdr->starting_offsets_start));
  131. DRM_DEBUG("reg_list_format_size_bytes: %u\n",
  132. le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
  133. DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
  134. le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
  135. DRM_DEBUG("reg_list_size_bytes: %u\n",
  136. le32_to_cpu(rlc_hdr->reg_list_size_bytes));
  137. DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
  138. le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
  139. DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
  140. le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
  141. DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
  142. le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
  143. DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
  144. le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
  145. DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
  146. le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
  147. if (version_minor == 1) {
  148. const struct rlc_firmware_header_v2_1 *v2_1 =
  149. container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
  150. DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
  151. le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
  152. DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
  153. le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
  154. DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
  155. le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
  156. DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
  157. le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
  158. DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
  159. le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
  160. DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
  161. le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
  162. DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
  163. le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
  164. DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
  165. le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
  166. DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
  167. le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
  168. DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
  169. le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
  170. DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
  171. le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
  172. DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
  173. le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
  174. DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
  175. le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
  176. }
  177. } else {
  178. DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
  179. }
  180. }
  181. void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
  182. {
  183. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  184. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  185. DRM_DEBUG("SDMA\n");
  186. amdgpu_ucode_print_common_hdr(hdr);
  187. if (version_major == 1) {
  188. const struct sdma_firmware_header_v1_0 *sdma_hdr =
  189. container_of(hdr, struct sdma_firmware_header_v1_0, header);
  190. DRM_DEBUG("ucode_feature_version: %u\n",
  191. le32_to_cpu(sdma_hdr->ucode_feature_version));
  192. DRM_DEBUG("ucode_change_version: %u\n",
  193. le32_to_cpu(sdma_hdr->ucode_change_version));
  194. DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
  195. DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
  196. if (version_minor >= 1) {
  197. const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr =
  198. container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0);
  199. DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size));
  200. }
  201. } else {
  202. DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
  203. version_major, version_minor);
  204. }
  205. }
  206. void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
  207. {
  208. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  209. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  210. DRM_DEBUG("GPU_INFO\n");
  211. amdgpu_ucode_print_common_hdr(hdr);
  212. if (version_major == 1) {
  213. const struct gpu_info_firmware_header_v1_0 *gpu_info_hdr =
  214. container_of(hdr, struct gpu_info_firmware_header_v1_0, header);
  215. DRM_DEBUG("version_major: %u\n",
  216. le16_to_cpu(gpu_info_hdr->version_major));
  217. DRM_DEBUG("version_minor: %u\n",
  218. le16_to_cpu(gpu_info_hdr->version_minor));
  219. } else {
  220. DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major, version_minor);
  221. }
  222. }
  223. int amdgpu_ucode_validate(const struct firmware *fw)
  224. {
  225. const struct common_firmware_header *hdr =
  226. (const struct common_firmware_header *)fw->data;
  227. if (fw->size == le32_to_cpu(hdr->size_bytes))
  228. return 0;
  229. return -EINVAL;
  230. }
  231. bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
  232. uint16_t hdr_major, uint16_t hdr_minor)
  233. {
  234. if ((hdr->common.header_version_major == hdr_major) &&
  235. (hdr->common.header_version_minor == hdr_minor))
  236. return false;
  237. return true;
  238. }
  239. enum amdgpu_firmware_load_type
  240. amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
  241. {
  242. switch (adev->asic_type) {
  243. #ifdef CONFIG_DRM_AMDGPU_SI
  244. case CHIP_TAHITI:
  245. case CHIP_PITCAIRN:
  246. case CHIP_VERDE:
  247. case CHIP_OLAND:
  248. case CHIP_HAINAN:
  249. return AMDGPU_FW_LOAD_DIRECT;
  250. #endif
  251. #ifdef CONFIG_DRM_AMDGPU_CIK
  252. case CHIP_BONAIRE:
  253. case CHIP_KAVERI:
  254. case CHIP_KABINI:
  255. case CHIP_HAWAII:
  256. case CHIP_MULLINS:
  257. return AMDGPU_FW_LOAD_DIRECT;
  258. #endif
  259. case CHIP_TOPAZ:
  260. case CHIP_TONGA:
  261. case CHIP_FIJI:
  262. case CHIP_CARRIZO:
  263. case CHIP_STONEY:
  264. case CHIP_POLARIS10:
  265. case CHIP_POLARIS11:
  266. case CHIP_POLARIS12:
  267. case CHIP_VEGAM:
  268. return AMDGPU_FW_LOAD_SMU;
  269. case CHIP_VEGA10:
  270. case CHIP_RAVEN:
  271. case CHIP_VEGA12:
  272. case CHIP_VEGA20:
  273. if (!load_type)
  274. return AMDGPU_FW_LOAD_DIRECT;
  275. else
  276. return AMDGPU_FW_LOAD_PSP;
  277. default:
  278. DRM_ERROR("Unknown firmware load type\n");
  279. }
  280. return AMDGPU_FW_LOAD_DIRECT;
  281. }
  282. static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
  283. struct amdgpu_firmware_info *ucode,
  284. uint64_t mc_addr, void *kptr)
  285. {
  286. const struct common_firmware_header *header = NULL;
  287. const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
  288. const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
  289. if (NULL == ucode->fw)
  290. return 0;
  291. ucode->mc_addr = mc_addr;
  292. ucode->kaddr = kptr;
  293. if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
  294. return 0;
  295. header = (const struct common_firmware_header *)ucode->fw->data;
  296. cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  297. dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data;
  298. if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
  299. (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
  300. ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
  301. ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
  302. ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
  303. ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
  304. ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
  305. ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
  306. ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
  307. ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) {
  308. ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
  309. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  310. le32_to_cpu(header->ucode_array_offset_bytes)),
  311. ucode->ucode_size);
  312. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 ||
  313. ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) {
  314. ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
  315. le32_to_cpu(cp_hdr->jt_size) * 4;
  316. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  317. le32_to_cpu(header->ucode_array_offset_bytes)),
  318. ucode->ucode_size);
  319. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
  320. ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) {
  321. ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
  322. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  323. le32_to_cpu(header->ucode_array_offset_bytes) +
  324. le32_to_cpu(cp_hdr->jt_offset) * 4),
  325. ucode->ucode_size);
  326. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) {
  327. ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
  328. le32_to_cpu(dmcu_hdr->intv_size_bytes);
  329. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  330. le32_to_cpu(header->ucode_array_offset_bytes)),
  331. ucode->ucode_size);
  332. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) {
  333. ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
  334. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  335. le32_to_cpu(header->ucode_array_offset_bytes) +
  336. le32_to_cpu(dmcu_hdr->intv_offset_bytes)),
  337. ucode->ucode_size);
  338. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
  339. ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
  340. memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
  341. ucode->ucode_size);
  342. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
  343. ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
  344. memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
  345. ucode->ucode_size);
  346. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
  347. ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
  348. memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
  349. ucode->ucode_size);
  350. }
  351. return 0;
  352. }
  353. static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
  354. uint64_t mc_addr, void *kptr)
  355. {
  356. const struct gfx_firmware_header_v1_0 *header = NULL;
  357. const struct common_firmware_header *comm_hdr = NULL;
  358. uint8_t* src_addr = NULL;
  359. uint8_t* dst_addr = NULL;
  360. if (NULL == ucode->fw)
  361. return 0;
  362. comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
  363. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  364. dst_addr = ucode->kaddr +
  365. ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
  366. PAGE_SIZE);
  367. src_addr = (uint8_t *)ucode->fw->data +
  368. le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
  369. (le32_to_cpu(header->jt_offset) * 4);
  370. memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
  371. return 0;
  372. }
  373. int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
  374. {
  375. if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
  376. amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
  377. amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
  378. &adev->firmware.fw_buf,
  379. &adev->firmware.fw_buf_mc,
  380. &adev->firmware.fw_buf_ptr);
  381. if (!adev->firmware.fw_buf) {
  382. dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
  383. return -ENOMEM;
  384. } else if (amdgpu_sriov_vf(adev)) {
  385. memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
  386. }
  387. }
  388. return 0;
  389. }
  390. void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
  391. {
  392. if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
  393. amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
  394. &adev->firmware.fw_buf_mc,
  395. &adev->firmware.fw_buf_ptr);
  396. }
  397. int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
  398. {
  399. uint64_t fw_offset = 0;
  400. int i;
  401. struct amdgpu_firmware_info *ucode = NULL;
  402. /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
  403. if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend))
  404. return 0;
  405. /*
  406. * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
  407. * ucode info here
  408. */
  409. if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
  410. if (amdgpu_sriov_vf(adev))
  411. adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3;
  412. else
  413. adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
  414. } else {
  415. adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
  416. }
  417. for (i = 0; i < adev->firmware.max_ucodes; i++) {
  418. ucode = &adev->firmware.ucode[i];
  419. if (ucode->fw) {
  420. amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
  421. adev->firmware.fw_buf_ptr + fw_offset);
  422. if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
  423. adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
  424. const struct gfx_firmware_header_v1_0 *cp_hdr;
  425. cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  426. amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
  427. adev->firmware.fw_buf_ptr + fw_offset);
  428. fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
  429. }
  430. fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
  431. }
  432. }
  433. return 0;
  434. }