amdgpu_cgs.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <linux/pci.h>
  27. #include <drm/drmP.h>
  28. #include <linux/firmware.h>
  29. #include <drm/amdgpu_drm.h>
  30. #include "amdgpu.h"
  31. #include "atom.h"
  32. #include "amdgpu_ucode.h"
  33. struct amdgpu_cgs_device {
  34. struct cgs_device base;
  35. struct amdgpu_device *adev;
  36. };
  37. #define CGS_FUNC_ADEV \
  38. struct amdgpu_device *adev = \
  39. ((struct amdgpu_cgs_device *)cgs_device)->adev
  40. static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
  41. {
  42. CGS_FUNC_ADEV;
  43. return RREG32(offset);
  44. }
  45. static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
  46. uint32_t value)
  47. {
  48. CGS_FUNC_ADEV;
  49. WREG32(offset, value);
  50. }
  51. static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
  52. enum cgs_ind_reg space,
  53. unsigned index)
  54. {
  55. CGS_FUNC_ADEV;
  56. switch (space) {
  57. case CGS_IND_REG__MMIO:
  58. return RREG32_IDX(index);
  59. case CGS_IND_REG__PCIE:
  60. return RREG32_PCIE(index);
  61. case CGS_IND_REG__SMC:
  62. return RREG32_SMC(index);
  63. case CGS_IND_REG__UVD_CTX:
  64. return RREG32_UVD_CTX(index);
  65. case CGS_IND_REG__DIDT:
  66. return RREG32_DIDT(index);
  67. case CGS_IND_REG_GC_CAC:
  68. return RREG32_GC_CAC(index);
  69. case CGS_IND_REG_SE_CAC:
  70. return RREG32_SE_CAC(index);
  71. case CGS_IND_REG__AUDIO_ENDPT:
  72. DRM_ERROR("audio endpt register access not implemented.\n");
  73. return 0;
  74. }
  75. WARN(1, "Invalid indirect register space");
  76. return 0;
  77. }
  78. static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
  79. enum cgs_ind_reg space,
  80. unsigned index, uint32_t value)
  81. {
  82. CGS_FUNC_ADEV;
  83. switch (space) {
  84. case CGS_IND_REG__MMIO:
  85. return WREG32_IDX(index, value);
  86. case CGS_IND_REG__PCIE:
  87. return WREG32_PCIE(index, value);
  88. case CGS_IND_REG__SMC:
  89. return WREG32_SMC(index, value);
  90. case CGS_IND_REG__UVD_CTX:
  91. return WREG32_UVD_CTX(index, value);
  92. case CGS_IND_REG__DIDT:
  93. return WREG32_DIDT(index, value);
  94. case CGS_IND_REG_GC_CAC:
  95. return WREG32_GC_CAC(index, value);
  96. case CGS_IND_REG_SE_CAC:
  97. return WREG32_SE_CAC(index, value);
  98. case CGS_IND_REG__AUDIO_ENDPT:
  99. DRM_ERROR("audio endpt register access not implemented.\n");
  100. return;
  101. }
  102. WARN(1, "Invalid indirect register space");
  103. }
  104. static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
  105. enum cgs_resource_type resource_type,
  106. uint64_t size,
  107. uint64_t offset,
  108. uint64_t *resource_base)
  109. {
  110. CGS_FUNC_ADEV;
  111. if (resource_base == NULL)
  112. return -EINVAL;
  113. switch (resource_type) {
  114. case CGS_RESOURCE_TYPE_MMIO:
  115. if (adev->rmmio_size == 0)
  116. return -ENOENT;
  117. if ((offset + size) > adev->rmmio_size)
  118. return -EINVAL;
  119. *resource_base = adev->rmmio_base;
  120. return 0;
  121. case CGS_RESOURCE_TYPE_DOORBELL:
  122. if (adev->doorbell.size == 0)
  123. return -ENOENT;
  124. if ((offset + size) > adev->doorbell.size)
  125. return -EINVAL;
  126. *resource_base = adev->doorbell.base;
  127. return 0;
  128. case CGS_RESOURCE_TYPE_FB:
  129. case CGS_RESOURCE_TYPE_IO:
  130. case CGS_RESOURCE_TYPE_ROM:
  131. default:
  132. return -EINVAL;
  133. }
  134. }
  135. static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
  136. unsigned table, uint16_t *size,
  137. uint8_t *frev, uint8_t *crev)
  138. {
  139. CGS_FUNC_ADEV;
  140. uint16_t data_start;
  141. if (amdgpu_atom_parse_data_header(
  142. adev->mode_info.atom_context, table, size,
  143. frev, crev, &data_start))
  144. return (uint8_t*)adev->mode_info.atom_context->bios +
  145. data_start;
  146. return NULL;
  147. }
  148. static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
  149. uint8_t *frev, uint8_t *crev)
  150. {
  151. CGS_FUNC_ADEV;
  152. if (amdgpu_atom_parse_cmd_header(
  153. adev->mode_info.atom_context, table,
  154. frev, crev))
  155. return 0;
  156. return -EINVAL;
  157. }
  158. static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
  159. void *args)
  160. {
  161. CGS_FUNC_ADEV;
  162. return amdgpu_atom_execute_table(
  163. adev->mode_info.atom_context, table, args);
  164. }
  165. static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
  166. enum amd_ip_block_type block_type,
  167. enum amd_clockgating_state state)
  168. {
  169. CGS_FUNC_ADEV;
  170. int i, r = -1;
  171. for (i = 0; i < adev->num_ip_blocks; i++) {
  172. if (!adev->ip_blocks[i].status.valid)
  173. continue;
  174. if (adev->ip_blocks[i].version->type == block_type) {
  175. r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
  176. (void *)adev,
  177. state);
  178. break;
  179. }
  180. }
  181. return r;
  182. }
  183. static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
  184. enum amd_ip_block_type block_type,
  185. enum amd_powergating_state state)
  186. {
  187. CGS_FUNC_ADEV;
  188. int i, r = -1;
  189. for (i = 0; i < adev->num_ip_blocks; i++) {
  190. if (!adev->ip_blocks[i].status.valid)
  191. continue;
  192. if (adev->ip_blocks[i].version->type == block_type) {
  193. r = adev->ip_blocks[i].version->funcs->set_powergating_state(
  194. (void *)adev,
  195. state);
  196. break;
  197. }
  198. }
  199. return r;
  200. }
  201. static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
  202. {
  203. CGS_FUNC_ADEV;
  204. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  205. switch (fw_type) {
  206. case CGS_UCODE_ID_SDMA0:
  207. result = AMDGPU_UCODE_ID_SDMA0;
  208. break;
  209. case CGS_UCODE_ID_SDMA1:
  210. result = AMDGPU_UCODE_ID_SDMA1;
  211. break;
  212. case CGS_UCODE_ID_CP_CE:
  213. result = AMDGPU_UCODE_ID_CP_CE;
  214. break;
  215. case CGS_UCODE_ID_CP_PFP:
  216. result = AMDGPU_UCODE_ID_CP_PFP;
  217. break;
  218. case CGS_UCODE_ID_CP_ME:
  219. result = AMDGPU_UCODE_ID_CP_ME;
  220. break;
  221. case CGS_UCODE_ID_CP_MEC:
  222. case CGS_UCODE_ID_CP_MEC_JT1:
  223. result = AMDGPU_UCODE_ID_CP_MEC1;
  224. break;
  225. case CGS_UCODE_ID_CP_MEC_JT2:
  226. /* for VI. JT2 should be the same as JT1, because:
  227. 1, MEC2 and MEC1 use exactly same FW.
  228. 2, JT2 is not pached but JT1 is.
  229. */
  230. if (adev->asic_type >= CHIP_TOPAZ)
  231. result = AMDGPU_UCODE_ID_CP_MEC1;
  232. else
  233. result = AMDGPU_UCODE_ID_CP_MEC2;
  234. break;
  235. case CGS_UCODE_ID_RLC_G:
  236. result = AMDGPU_UCODE_ID_RLC_G;
  237. break;
  238. case CGS_UCODE_ID_STORAGE:
  239. result = AMDGPU_UCODE_ID_STORAGE;
  240. break;
  241. default:
  242. DRM_ERROR("Firmware type not supported\n");
  243. }
  244. return result;
  245. }
  246. static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
  247. {
  248. CGS_FUNC_ADEV;
  249. if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
  250. release_firmware(adev->pm.fw);
  251. adev->pm.fw = NULL;
  252. return 0;
  253. }
  254. /* cannot release other firmware because they are not created by cgs */
  255. return -EINVAL;
  256. }
  257. static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
  258. enum cgs_ucode_id type)
  259. {
  260. CGS_FUNC_ADEV;
  261. uint16_t fw_version = 0;
  262. switch (type) {
  263. case CGS_UCODE_ID_SDMA0:
  264. fw_version = adev->sdma.instance[0].fw_version;
  265. break;
  266. case CGS_UCODE_ID_SDMA1:
  267. fw_version = adev->sdma.instance[1].fw_version;
  268. break;
  269. case CGS_UCODE_ID_CP_CE:
  270. fw_version = adev->gfx.ce_fw_version;
  271. break;
  272. case CGS_UCODE_ID_CP_PFP:
  273. fw_version = adev->gfx.pfp_fw_version;
  274. break;
  275. case CGS_UCODE_ID_CP_ME:
  276. fw_version = adev->gfx.me_fw_version;
  277. break;
  278. case CGS_UCODE_ID_CP_MEC:
  279. fw_version = adev->gfx.mec_fw_version;
  280. break;
  281. case CGS_UCODE_ID_CP_MEC_JT1:
  282. fw_version = adev->gfx.mec_fw_version;
  283. break;
  284. case CGS_UCODE_ID_CP_MEC_JT2:
  285. fw_version = adev->gfx.mec_fw_version;
  286. break;
  287. case CGS_UCODE_ID_RLC_G:
  288. fw_version = adev->gfx.rlc_fw_version;
  289. break;
  290. case CGS_UCODE_ID_STORAGE:
  291. break;
  292. default:
  293. DRM_ERROR("firmware type %d do not have version\n", type);
  294. break;
  295. }
  296. return fw_version;
  297. }
  298. static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
  299. bool en)
  300. {
  301. CGS_FUNC_ADEV;
  302. if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
  303. adev->gfx.rlc.funcs->exit_safe_mode == NULL)
  304. return 0;
  305. if (en)
  306. adev->gfx.rlc.funcs->enter_safe_mode(adev);
  307. else
  308. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  309. return 0;
  310. }
  311. static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
  312. bool lock)
  313. {
  314. CGS_FUNC_ADEV;
  315. if (lock)
  316. mutex_lock(&adev->grbm_idx_mutex);
  317. else
  318. mutex_unlock(&adev->grbm_idx_mutex);
  319. }
  320. static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
  321. enum cgs_ucode_id type,
  322. struct cgs_firmware_info *info)
  323. {
  324. CGS_FUNC_ADEV;
  325. if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
  326. uint64_t gpu_addr;
  327. uint32_t data_size;
  328. const struct gfx_firmware_header_v1_0 *header;
  329. enum AMDGPU_UCODE_ID id;
  330. struct amdgpu_firmware_info *ucode;
  331. id = fw_type_convert(cgs_device, type);
  332. ucode = &adev->firmware.ucode[id];
  333. if (ucode->fw == NULL)
  334. return -EINVAL;
  335. gpu_addr = ucode->mc_addr;
  336. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  337. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  338. if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
  339. (type == CGS_UCODE_ID_CP_MEC_JT2)) {
  340. gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
  341. data_size = le32_to_cpu(header->jt_size) << 2;
  342. }
  343. info->kptr = ucode->kaddr;
  344. info->image_size = data_size;
  345. info->mc_addr = gpu_addr;
  346. info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
  347. if (CGS_UCODE_ID_CP_MEC == type)
  348. info->image_size = le32_to_cpu(header->jt_offset) << 2;
  349. info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
  350. info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
  351. } else {
  352. char fw_name[30] = {0};
  353. int err = 0;
  354. uint32_t ucode_size;
  355. uint32_t ucode_start_address;
  356. const uint8_t *src;
  357. const struct smc_firmware_header_v1_0 *hdr;
  358. const struct common_firmware_header *header;
  359. struct amdgpu_firmware_info *ucode = NULL;
  360. if (!adev->pm.fw) {
  361. switch (adev->asic_type) {
  362. case CHIP_TAHITI:
  363. strcpy(fw_name, "radeon/tahiti_smc.bin");
  364. break;
  365. case CHIP_PITCAIRN:
  366. if ((adev->pdev->revision == 0x81) &&
  367. ((adev->pdev->device == 0x6810) ||
  368. (adev->pdev->device == 0x6811))) {
  369. info->is_kicker = true;
  370. strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
  371. } else {
  372. strcpy(fw_name, "radeon/pitcairn_smc.bin");
  373. }
  374. break;
  375. case CHIP_VERDE:
  376. if (((adev->pdev->device == 0x6820) &&
  377. ((adev->pdev->revision == 0x81) ||
  378. (adev->pdev->revision == 0x83))) ||
  379. ((adev->pdev->device == 0x6821) &&
  380. ((adev->pdev->revision == 0x83) ||
  381. (adev->pdev->revision == 0x87))) ||
  382. ((adev->pdev->revision == 0x87) &&
  383. ((adev->pdev->device == 0x6823) ||
  384. (adev->pdev->device == 0x682b)))) {
  385. info->is_kicker = true;
  386. strcpy(fw_name, "radeon/verde_k_smc.bin");
  387. } else {
  388. strcpy(fw_name, "radeon/verde_smc.bin");
  389. }
  390. break;
  391. case CHIP_OLAND:
  392. if (((adev->pdev->revision == 0x81) &&
  393. ((adev->pdev->device == 0x6600) ||
  394. (adev->pdev->device == 0x6604) ||
  395. (adev->pdev->device == 0x6605) ||
  396. (adev->pdev->device == 0x6610))) ||
  397. ((adev->pdev->revision == 0x83) &&
  398. (adev->pdev->device == 0x6610))) {
  399. info->is_kicker = true;
  400. strcpy(fw_name, "radeon/oland_k_smc.bin");
  401. } else {
  402. strcpy(fw_name, "radeon/oland_smc.bin");
  403. }
  404. break;
  405. case CHIP_HAINAN:
  406. if (((adev->pdev->revision == 0x81) &&
  407. (adev->pdev->device == 0x6660)) ||
  408. ((adev->pdev->revision == 0x83) &&
  409. ((adev->pdev->device == 0x6660) ||
  410. (adev->pdev->device == 0x6663) ||
  411. (adev->pdev->device == 0x6665) ||
  412. (adev->pdev->device == 0x6667)))) {
  413. info->is_kicker = true;
  414. strcpy(fw_name, "radeon/hainan_k_smc.bin");
  415. } else if ((adev->pdev->revision == 0xc3) &&
  416. (adev->pdev->device == 0x6665)) {
  417. info->is_kicker = true;
  418. strcpy(fw_name, "radeon/banks_k_2_smc.bin");
  419. } else {
  420. strcpy(fw_name, "radeon/hainan_smc.bin");
  421. }
  422. break;
  423. case CHIP_BONAIRE:
  424. if ((adev->pdev->revision == 0x80) ||
  425. (adev->pdev->revision == 0x81) ||
  426. (adev->pdev->device == 0x665f)) {
  427. info->is_kicker = true;
  428. strcpy(fw_name, "radeon/bonaire_k_smc.bin");
  429. } else {
  430. strcpy(fw_name, "radeon/bonaire_smc.bin");
  431. }
  432. break;
  433. case CHIP_HAWAII:
  434. if (adev->pdev->revision == 0x80) {
  435. info->is_kicker = true;
  436. strcpy(fw_name, "radeon/hawaii_k_smc.bin");
  437. } else {
  438. strcpy(fw_name, "radeon/hawaii_smc.bin");
  439. }
  440. break;
  441. case CHIP_TOPAZ:
  442. if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
  443. ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
  444. ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
  445. info->is_kicker = true;
  446. strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
  447. } else
  448. strcpy(fw_name, "amdgpu/topaz_smc.bin");
  449. break;
  450. case CHIP_TONGA:
  451. if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
  452. ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
  453. info->is_kicker = true;
  454. strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
  455. } else
  456. strcpy(fw_name, "amdgpu/tonga_smc.bin");
  457. break;
  458. case CHIP_FIJI:
  459. strcpy(fw_name, "amdgpu/fiji_smc.bin");
  460. break;
  461. case CHIP_POLARIS11:
  462. if (type == CGS_UCODE_ID_SMU) {
  463. if (((adev->pdev->device == 0x67ef) &&
  464. ((adev->pdev->revision == 0xe0) ||
  465. (adev->pdev->revision == 0xe2) ||
  466. (adev->pdev->revision == 0xe5))) ||
  467. ((adev->pdev->device == 0x67ff) &&
  468. ((adev->pdev->revision == 0xcf) ||
  469. (adev->pdev->revision == 0xef) ||
  470. (adev->pdev->revision == 0xff)))) {
  471. info->is_kicker = true;
  472. strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
  473. } else
  474. strcpy(fw_name, "amdgpu/polaris11_smc.bin");
  475. } else if (type == CGS_UCODE_ID_SMU_SK) {
  476. strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
  477. }
  478. break;
  479. case CHIP_POLARIS10:
  480. if (type == CGS_UCODE_ID_SMU) {
  481. if ((adev->pdev->device == 0x67df) &&
  482. ((adev->pdev->revision == 0xe0) ||
  483. (adev->pdev->revision == 0xe3) ||
  484. (adev->pdev->revision == 0xe4) ||
  485. (adev->pdev->revision == 0xe5) ||
  486. (adev->pdev->revision == 0xe7) ||
  487. (adev->pdev->revision == 0xef))) {
  488. info->is_kicker = true;
  489. strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
  490. } else
  491. strcpy(fw_name, "amdgpu/polaris10_smc.bin");
  492. } else if (type == CGS_UCODE_ID_SMU_SK) {
  493. strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
  494. }
  495. break;
  496. case CHIP_POLARIS12:
  497. strcpy(fw_name, "amdgpu/polaris12_smc.bin");
  498. break;
  499. case CHIP_VEGA10:
  500. if ((adev->pdev->device == 0x687f) &&
  501. ((adev->pdev->revision == 0xc0) ||
  502. (adev->pdev->revision == 0xc1) ||
  503. (adev->pdev->revision == 0xc3)))
  504. strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
  505. else
  506. strcpy(fw_name, "amdgpu/vega10_smc.bin");
  507. break;
  508. case CHIP_VEGA12:
  509. strcpy(fw_name, "amdgpu/vega12_smc.bin");
  510. break;
  511. default:
  512. DRM_ERROR("SMC firmware not supported\n");
  513. return -EINVAL;
  514. }
  515. err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
  516. if (err) {
  517. DRM_ERROR("Failed to request firmware\n");
  518. return err;
  519. }
  520. err = amdgpu_ucode_validate(adev->pm.fw);
  521. if (err) {
  522. DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
  523. release_firmware(adev->pm.fw);
  524. adev->pm.fw = NULL;
  525. return err;
  526. }
  527. if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
  528. ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
  529. ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
  530. ucode->fw = adev->pm.fw;
  531. header = (const struct common_firmware_header *)ucode->fw->data;
  532. adev->firmware.fw_size +=
  533. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  534. }
  535. }
  536. hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
  537. amdgpu_ucode_print_smc_hdr(&hdr->header);
  538. adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
  539. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
  540. ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
  541. src = (const uint8_t *)(adev->pm.fw->data +
  542. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  543. info->version = adev->pm.fw_version;
  544. info->image_size = ucode_size;
  545. info->ucode_start_address = ucode_start_address;
  546. info->kptr = (void *)src;
  547. }
  548. return 0;
  549. }
  550. static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
  551. {
  552. CGS_FUNC_ADEV;
  553. return amdgpu_sriov_vf(adev);
  554. }
  555. static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
  556. struct cgs_display_info *info)
  557. {
  558. CGS_FUNC_ADEV;
  559. struct cgs_mode_info *mode_info;
  560. if (info == NULL)
  561. return -EINVAL;
  562. mode_info = info->mode_info;
  563. if (mode_info)
  564. /* if the displays are off, vblank time is max */
  565. mode_info->vblank_time_us = 0xffffffff;
  566. if (!amdgpu_device_has_dc_support(adev)) {
  567. struct amdgpu_crtc *amdgpu_crtc;
  568. struct drm_device *ddev = adev->ddev;
  569. struct drm_crtc *crtc;
  570. uint32_t line_time_us, vblank_lines;
  571. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  572. list_for_each_entry(crtc,
  573. &ddev->mode_config.crtc_list, head) {
  574. amdgpu_crtc = to_amdgpu_crtc(crtc);
  575. if (crtc->enabled) {
  576. info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
  577. info->display_count++;
  578. }
  579. if (mode_info != NULL &&
  580. crtc->enabled && amdgpu_crtc->enabled &&
  581. amdgpu_crtc->hw_mode.clock) {
  582. line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
  583. amdgpu_crtc->hw_mode.clock;
  584. vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
  585. amdgpu_crtc->hw_mode.crtc_vdisplay +
  586. (amdgpu_crtc->v_border * 2);
  587. mode_info->vblank_time_us = vblank_lines * line_time_us;
  588. mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
  589. /* we have issues with mclk switching with refresh rates
  590. * over 120 hz on the non-DC code.
  591. */
  592. if (mode_info->refresh_rate > 120)
  593. mode_info->vblank_time_us = 0;
  594. mode_info = NULL;
  595. }
  596. }
  597. }
  598. } else {
  599. info->display_count = adev->pm.pm_display_cfg.num_display;
  600. if (mode_info != NULL) {
  601. mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
  602. mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
  603. }
  604. }
  605. return 0;
  606. }
  607. static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
  608. {
  609. CGS_FUNC_ADEV;
  610. adev->pm.dpm_enabled = enabled;
  611. return 0;
  612. }
  613. static const struct cgs_ops amdgpu_cgs_ops = {
  614. .read_register = amdgpu_cgs_read_register,
  615. .write_register = amdgpu_cgs_write_register,
  616. .read_ind_register = amdgpu_cgs_read_ind_register,
  617. .write_ind_register = amdgpu_cgs_write_ind_register,
  618. .get_pci_resource = amdgpu_cgs_get_pci_resource,
  619. .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
  620. .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
  621. .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
  622. .get_firmware_info = amdgpu_cgs_get_firmware_info,
  623. .rel_firmware = amdgpu_cgs_rel_firmware,
  624. .set_powergating_state = amdgpu_cgs_set_powergating_state,
  625. .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
  626. .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
  627. .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
  628. .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
  629. .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
  630. .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
  631. };
  632. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
  633. {
  634. struct amdgpu_cgs_device *cgs_device =
  635. kmalloc(sizeof(*cgs_device), GFP_KERNEL);
  636. if (!cgs_device) {
  637. DRM_ERROR("Couldn't allocate CGS device structure\n");
  638. return NULL;
  639. }
  640. cgs_device->base.ops = &amdgpu_cgs_ops;
  641. cgs_device->adev = adev;
  642. return (struct cgs_device *)cgs_device;
  643. }
  644. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
  645. {
  646. kfree(cgs_device);
  647. }