amdgpu_cgs.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <linux/pci.h>
  27. #include <drm/drmP.h>
  28. #include <linux/firmware.h>
  29. #include <drm/amdgpu_drm.h>
  30. #include "amdgpu.h"
  31. #include "cgs_linux.h"
  32. #include "atom.h"
  33. #include "amdgpu_ucode.h"
  34. struct amdgpu_cgs_device {
  35. struct cgs_device base;
  36. struct amdgpu_device *adev;
  37. };
  38. #define CGS_FUNC_ADEV \
  39. struct amdgpu_device *adev = \
  40. ((struct amdgpu_cgs_device *)cgs_device)->adev
  41. static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
  42. {
  43. CGS_FUNC_ADEV;
  44. return RREG32(offset);
  45. }
  46. static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
  47. uint32_t value)
  48. {
  49. CGS_FUNC_ADEV;
  50. WREG32(offset, value);
  51. }
  52. static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
  53. enum cgs_ind_reg space,
  54. unsigned index)
  55. {
  56. CGS_FUNC_ADEV;
  57. switch (space) {
  58. case CGS_IND_REG__MMIO:
  59. return RREG32_IDX(index);
  60. case CGS_IND_REG__PCIE:
  61. return RREG32_PCIE(index);
  62. case CGS_IND_REG__SMC:
  63. return RREG32_SMC(index);
  64. case CGS_IND_REG__UVD_CTX:
  65. return RREG32_UVD_CTX(index);
  66. case CGS_IND_REG__DIDT:
  67. return RREG32_DIDT(index);
  68. case CGS_IND_REG_GC_CAC:
  69. return RREG32_GC_CAC(index);
  70. case CGS_IND_REG_SE_CAC:
  71. return RREG32_SE_CAC(index);
  72. case CGS_IND_REG__AUDIO_ENDPT:
  73. DRM_ERROR("audio endpt register access not implemented.\n");
  74. return 0;
  75. }
  76. WARN(1, "Invalid indirect register space");
  77. return 0;
  78. }
  79. static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
  80. enum cgs_ind_reg space,
  81. unsigned index, uint32_t value)
  82. {
  83. CGS_FUNC_ADEV;
  84. switch (space) {
  85. case CGS_IND_REG__MMIO:
  86. return WREG32_IDX(index, value);
  87. case CGS_IND_REG__PCIE:
  88. return WREG32_PCIE(index, value);
  89. case CGS_IND_REG__SMC:
  90. return WREG32_SMC(index, value);
  91. case CGS_IND_REG__UVD_CTX:
  92. return WREG32_UVD_CTX(index, value);
  93. case CGS_IND_REG__DIDT:
  94. return WREG32_DIDT(index, value);
  95. case CGS_IND_REG_GC_CAC:
  96. return WREG32_GC_CAC(index, value);
  97. case CGS_IND_REG_SE_CAC:
  98. return WREG32_SE_CAC(index, value);
  99. case CGS_IND_REG__AUDIO_ENDPT:
  100. DRM_ERROR("audio endpt register access not implemented.\n");
  101. return;
  102. }
  103. WARN(1, "Invalid indirect register space");
  104. }
  105. static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
  106. enum cgs_resource_type resource_type,
  107. uint64_t size,
  108. uint64_t offset,
  109. uint64_t *resource_base)
  110. {
  111. CGS_FUNC_ADEV;
  112. if (resource_base == NULL)
  113. return -EINVAL;
  114. switch (resource_type) {
  115. case CGS_RESOURCE_TYPE_MMIO:
  116. if (adev->rmmio_size == 0)
  117. return -ENOENT;
  118. if ((offset + size) > adev->rmmio_size)
  119. return -EINVAL;
  120. *resource_base = adev->rmmio_base;
  121. return 0;
  122. case CGS_RESOURCE_TYPE_DOORBELL:
  123. if (adev->doorbell.size == 0)
  124. return -ENOENT;
  125. if ((offset + size) > adev->doorbell.size)
  126. return -EINVAL;
  127. *resource_base = adev->doorbell.base;
  128. return 0;
  129. case CGS_RESOURCE_TYPE_FB:
  130. case CGS_RESOURCE_TYPE_IO:
  131. case CGS_RESOURCE_TYPE_ROM:
  132. default:
  133. return -EINVAL;
  134. }
  135. }
  136. static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
  137. unsigned table, uint16_t *size,
  138. uint8_t *frev, uint8_t *crev)
  139. {
  140. CGS_FUNC_ADEV;
  141. uint16_t data_start;
  142. if (amdgpu_atom_parse_data_header(
  143. adev->mode_info.atom_context, table, size,
  144. frev, crev, &data_start))
  145. return (uint8_t*)adev->mode_info.atom_context->bios +
  146. data_start;
  147. return NULL;
  148. }
  149. static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
  150. uint8_t *frev, uint8_t *crev)
  151. {
  152. CGS_FUNC_ADEV;
  153. if (amdgpu_atom_parse_cmd_header(
  154. adev->mode_info.atom_context, table,
  155. frev, crev))
  156. return 0;
  157. return -EINVAL;
  158. }
  159. static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
  160. void *args)
  161. {
  162. CGS_FUNC_ADEV;
  163. return amdgpu_atom_execute_table(
  164. adev->mode_info.atom_context, table, args);
  165. }
  166. struct cgs_irq_params {
  167. unsigned src_id;
  168. cgs_irq_source_set_func_t set;
  169. cgs_irq_handler_func_t handler;
  170. void *private_data;
  171. };
  172. static int cgs_set_irq_state(struct amdgpu_device *adev,
  173. struct amdgpu_irq_src *src,
  174. unsigned type,
  175. enum amdgpu_interrupt_state state)
  176. {
  177. struct cgs_irq_params *irq_params =
  178. (struct cgs_irq_params *)src->data;
  179. if (!irq_params)
  180. return -EINVAL;
  181. if (!irq_params->set)
  182. return -EINVAL;
  183. return irq_params->set(irq_params->private_data,
  184. irq_params->src_id,
  185. type,
  186. (int)state);
  187. }
  188. static int cgs_process_irq(struct amdgpu_device *adev,
  189. struct amdgpu_irq_src *source,
  190. struct amdgpu_iv_entry *entry)
  191. {
  192. struct cgs_irq_params *irq_params =
  193. (struct cgs_irq_params *)source->data;
  194. if (!irq_params)
  195. return -EINVAL;
  196. if (!irq_params->handler)
  197. return -EINVAL;
  198. return irq_params->handler(irq_params->private_data,
  199. irq_params->src_id,
  200. entry->iv_entry);
  201. }
  202. static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
  203. .set = cgs_set_irq_state,
  204. .process = cgs_process_irq,
  205. };
  206. static int amdgpu_cgs_add_irq_source(void *cgs_device,
  207. unsigned client_id,
  208. unsigned src_id,
  209. unsigned num_types,
  210. cgs_irq_source_set_func_t set,
  211. cgs_irq_handler_func_t handler,
  212. void *private_data)
  213. {
  214. CGS_FUNC_ADEV;
  215. int ret = 0;
  216. struct cgs_irq_params *irq_params;
  217. struct amdgpu_irq_src *source =
  218. kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
  219. if (!source)
  220. return -ENOMEM;
  221. irq_params =
  222. kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
  223. if (!irq_params) {
  224. kfree(source);
  225. return -ENOMEM;
  226. }
  227. source->num_types = num_types;
  228. source->funcs = &cgs_irq_funcs;
  229. irq_params->src_id = src_id;
  230. irq_params->set = set;
  231. irq_params->handler = handler;
  232. irq_params->private_data = private_data;
  233. source->data = (void *)irq_params;
  234. ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
  235. if (ret) {
  236. kfree(irq_params);
  237. kfree(source);
  238. }
  239. return ret;
  240. }
  241. static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
  242. unsigned src_id, unsigned type)
  243. {
  244. CGS_FUNC_ADEV;
  245. if (!adev->irq.client[client_id].sources)
  246. return -EINVAL;
  247. return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
  248. }
  249. static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
  250. unsigned src_id, unsigned type)
  251. {
  252. CGS_FUNC_ADEV;
  253. if (!adev->irq.client[client_id].sources)
  254. return -EINVAL;
  255. return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
  256. }
  257. static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
  258. enum amd_ip_block_type block_type,
  259. enum amd_clockgating_state state)
  260. {
  261. CGS_FUNC_ADEV;
  262. int i, r = -1;
  263. for (i = 0; i < adev->num_ip_blocks; i++) {
  264. if (!adev->ip_blocks[i].status.valid)
  265. continue;
  266. if (adev->ip_blocks[i].version->type == block_type) {
  267. r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
  268. (void *)adev,
  269. state);
  270. break;
  271. }
  272. }
  273. return r;
  274. }
  275. static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
  276. enum amd_ip_block_type block_type,
  277. enum amd_powergating_state state)
  278. {
  279. CGS_FUNC_ADEV;
  280. int i, r = -1;
  281. for (i = 0; i < adev->num_ip_blocks; i++) {
  282. if (!adev->ip_blocks[i].status.valid)
  283. continue;
  284. if (adev->ip_blocks[i].version->type == block_type) {
  285. r = adev->ip_blocks[i].version->funcs->set_powergating_state(
  286. (void *)adev,
  287. state);
  288. break;
  289. }
  290. }
  291. return r;
  292. }
  293. static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
  294. {
  295. CGS_FUNC_ADEV;
  296. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  297. switch (fw_type) {
  298. case CGS_UCODE_ID_SDMA0:
  299. result = AMDGPU_UCODE_ID_SDMA0;
  300. break;
  301. case CGS_UCODE_ID_SDMA1:
  302. result = AMDGPU_UCODE_ID_SDMA1;
  303. break;
  304. case CGS_UCODE_ID_CP_CE:
  305. result = AMDGPU_UCODE_ID_CP_CE;
  306. break;
  307. case CGS_UCODE_ID_CP_PFP:
  308. result = AMDGPU_UCODE_ID_CP_PFP;
  309. break;
  310. case CGS_UCODE_ID_CP_ME:
  311. result = AMDGPU_UCODE_ID_CP_ME;
  312. break;
  313. case CGS_UCODE_ID_CP_MEC:
  314. case CGS_UCODE_ID_CP_MEC_JT1:
  315. result = AMDGPU_UCODE_ID_CP_MEC1;
  316. break;
  317. case CGS_UCODE_ID_CP_MEC_JT2:
  318. /* for VI. JT2 should be the same as JT1, because:
  319. 1, MEC2 and MEC1 use exactly same FW.
  320. 2, JT2 is not pached but JT1 is.
  321. */
  322. if (adev->asic_type >= CHIP_TOPAZ)
  323. result = AMDGPU_UCODE_ID_CP_MEC1;
  324. else
  325. result = AMDGPU_UCODE_ID_CP_MEC2;
  326. break;
  327. case CGS_UCODE_ID_RLC_G:
  328. result = AMDGPU_UCODE_ID_RLC_G;
  329. break;
  330. case CGS_UCODE_ID_STORAGE:
  331. result = AMDGPU_UCODE_ID_STORAGE;
  332. break;
  333. default:
  334. DRM_ERROR("Firmware type not supported\n");
  335. }
  336. return result;
  337. }
  338. static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
  339. {
  340. CGS_FUNC_ADEV;
  341. if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
  342. release_firmware(adev->pm.fw);
  343. adev->pm.fw = NULL;
  344. return 0;
  345. }
  346. /* cannot release other firmware because they are not created by cgs */
  347. return -EINVAL;
  348. }
  349. static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
  350. enum cgs_ucode_id type)
  351. {
  352. CGS_FUNC_ADEV;
  353. uint16_t fw_version = 0;
  354. switch (type) {
  355. case CGS_UCODE_ID_SDMA0:
  356. fw_version = adev->sdma.instance[0].fw_version;
  357. break;
  358. case CGS_UCODE_ID_SDMA1:
  359. fw_version = adev->sdma.instance[1].fw_version;
  360. break;
  361. case CGS_UCODE_ID_CP_CE:
  362. fw_version = adev->gfx.ce_fw_version;
  363. break;
  364. case CGS_UCODE_ID_CP_PFP:
  365. fw_version = adev->gfx.pfp_fw_version;
  366. break;
  367. case CGS_UCODE_ID_CP_ME:
  368. fw_version = adev->gfx.me_fw_version;
  369. break;
  370. case CGS_UCODE_ID_CP_MEC:
  371. fw_version = adev->gfx.mec_fw_version;
  372. break;
  373. case CGS_UCODE_ID_CP_MEC_JT1:
  374. fw_version = adev->gfx.mec_fw_version;
  375. break;
  376. case CGS_UCODE_ID_CP_MEC_JT2:
  377. fw_version = adev->gfx.mec_fw_version;
  378. break;
  379. case CGS_UCODE_ID_RLC_G:
  380. fw_version = adev->gfx.rlc_fw_version;
  381. break;
  382. case CGS_UCODE_ID_STORAGE:
  383. break;
  384. default:
  385. DRM_ERROR("firmware type %d do not have version\n", type);
  386. break;
  387. }
  388. return fw_version;
  389. }
  390. static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
  391. bool en)
  392. {
  393. CGS_FUNC_ADEV;
  394. if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
  395. adev->gfx.rlc.funcs->exit_safe_mode == NULL)
  396. return 0;
  397. if (en)
  398. adev->gfx.rlc.funcs->enter_safe_mode(adev);
  399. else
  400. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  401. return 0;
  402. }
  403. static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
  404. bool lock)
  405. {
  406. CGS_FUNC_ADEV;
  407. if (lock)
  408. mutex_lock(&adev->grbm_idx_mutex);
  409. else
  410. mutex_unlock(&adev->grbm_idx_mutex);
  411. }
  412. static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
  413. enum cgs_ucode_id type,
  414. struct cgs_firmware_info *info)
  415. {
  416. CGS_FUNC_ADEV;
  417. if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
  418. uint64_t gpu_addr;
  419. uint32_t data_size;
  420. const struct gfx_firmware_header_v1_0 *header;
  421. enum AMDGPU_UCODE_ID id;
  422. struct amdgpu_firmware_info *ucode;
  423. id = fw_type_convert(cgs_device, type);
  424. ucode = &adev->firmware.ucode[id];
  425. if (ucode->fw == NULL)
  426. return -EINVAL;
  427. gpu_addr = ucode->mc_addr;
  428. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  429. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  430. if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
  431. (type == CGS_UCODE_ID_CP_MEC_JT2)) {
  432. gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
  433. data_size = le32_to_cpu(header->jt_size) << 2;
  434. }
  435. info->kptr = ucode->kaddr;
  436. info->image_size = data_size;
  437. info->mc_addr = gpu_addr;
  438. info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
  439. if (CGS_UCODE_ID_CP_MEC == type)
  440. info->image_size = le32_to_cpu(header->jt_offset) << 2;
  441. info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
  442. info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
  443. } else {
  444. char fw_name[30] = {0};
  445. int err = 0;
  446. uint32_t ucode_size;
  447. uint32_t ucode_start_address;
  448. const uint8_t *src;
  449. const struct smc_firmware_header_v1_0 *hdr;
  450. const struct common_firmware_header *header;
  451. struct amdgpu_firmware_info *ucode = NULL;
  452. if (!adev->pm.fw) {
  453. switch (adev->asic_type) {
  454. case CHIP_TAHITI:
  455. strcpy(fw_name, "radeon/tahiti_smc.bin");
  456. break;
  457. case CHIP_PITCAIRN:
  458. if ((adev->pdev->revision == 0x81) &&
  459. ((adev->pdev->device == 0x6810) ||
  460. (adev->pdev->device == 0x6811))) {
  461. info->is_kicker = true;
  462. strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
  463. } else {
  464. strcpy(fw_name, "radeon/pitcairn_smc.bin");
  465. }
  466. break;
  467. case CHIP_VERDE:
  468. if (((adev->pdev->device == 0x6820) &&
  469. ((adev->pdev->revision == 0x81) ||
  470. (adev->pdev->revision == 0x83))) ||
  471. ((adev->pdev->device == 0x6821) &&
  472. ((adev->pdev->revision == 0x83) ||
  473. (adev->pdev->revision == 0x87))) ||
  474. ((adev->pdev->revision == 0x87) &&
  475. ((adev->pdev->device == 0x6823) ||
  476. (adev->pdev->device == 0x682b)))) {
  477. info->is_kicker = true;
  478. strcpy(fw_name, "radeon/verde_k_smc.bin");
  479. } else {
  480. strcpy(fw_name, "radeon/verde_smc.bin");
  481. }
  482. break;
  483. case CHIP_OLAND:
  484. if (((adev->pdev->revision == 0x81) &&
  485. ((adev->pdev->device == 0x6600) ||
  486. (adev->pdev->device == 0x6604) ||
  487. (adev->pdev->device == 0x6605) ||
  488. (adev->pdev->device == 0x6610))) ||
  489. ((adev->pdev->revision == 0x83) &&
  490. (adev->pdev->device == 0x6610))) {
  491. info->is_kicker = true;
  492. strcpy(fw_name, "radeon/oland_k_smc.bin");
  493. } else {
  494. strcpy(fw_name, "radeon/oland_smc.bin");
  495. }
  496. break;
  497. case CHIP_HAINAN:
  498. if (((adev->pdev->revision == 0x81) &&
  499. (adev->pdev->device == 0x6660)) ||
  500. ((adev->pdev->revision == 0x83) &&
  501. ((adev->pdev->device == 0x6660) ||
  502. (adev->pdev->device == 0x6663) ||
  503. (adev->pdev->device == 0x6665) ||
  504. (adev->pdev->device == 0x6667)))) {
  505. info->is_kicker = true;
  506. strcpy(fw_name, "radeon/hainan_k_smc.bin");
  507. } else if ((adev->pdev->revision == 0xc3) &&
  508. (adev->pdev->device == 0x6665)) {
  509. info->is_kicker = true;
  510. strcpy(fw_name, "radeon/banks_k_2_smc.bin");
  511. } else {
  512. strcpy(fw_name, "radeon/hainan_smc.bin");
  513. }
  514. break;
  515. case CHIP_BONAIRE:
  516. if ((adev->pdev->revision == 0x80) ||
  517. (adev->pdev->revision == 0x81) ||
  518. (adev->pdev->device == 0x665f)) {
  519. info->is_kicker = true;
  520. strcpy(fw_name, "radeon/bonaire_k_smc.bin");
  521. } else {
  522. strcpy(fw_name, "radeon/bonaire_smc.bin");
  523. }
  524. break;
  525. case CHIP_HAWAII:
  526. if (adev->pdev->revision == 0x80) {
  527. info->is_kicker = true;
  528. strcpy(fw_name, "radeon/hawaii_k_smc.bin");
  529. } else {
  530. strcpy(fw_name, "radeon/hawaii_smc.bin");
  531. }
  532. break;
  533. case CHIP_TOPAZ:
  534. if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
  535. ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
  536. ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
  537. info->is_kicker = true;
  538. strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
  539. } else
  540. strcpy(fw_name, "amdgpu/topaz_smc.bin");
  541. break;
  542. case CHIP_TONGA:
  543. if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
  544. ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
  545. info->is_kicker = true;
  546. strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
  547. } else
  548. strcpy(fw_name, "amdgpu/tonga_smc.bin");
  549. break;
  550. case CHIP_FIJI:
  551. strcpy(fw_name, "amdgpu/fiji_smc.bin");
  552. break;
  553. case CHIP_POLARIS11:
  554. if (type == CGS_UCODE_ID_SMU) {
  555. if (((adev->pdev->device == 0x67ef) &&
  556. ((adev->pdev->revision == 0xe0) ||
  557. (adev->pdev->revision == 0xe2) ||
  558. (adev->pdev->revision == 0xe5))) ||
  559. ((adev->pdev->device == 0x67ff) &&
  560. ((adev->pdev->revision == 0xcf) ||
  561. (adev->pdev->revision == 0xef) ||
  562. (adev->pdev->revision == 0xff)))) {
  563. info->is_kicker = true;
  564. strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
  565. } else
  566. strcpy(fw_name, "amdgpu/polaris11_smc.bin");
  567. } else if (type == CGS_UCODE_ID_SMU_SK) {
  568. strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
  569. }
  570. break;
  571. case CHIP_POLARIS10:
  572. if (type == CGS_UCODE_ID_SMU) {
  573. if ((adev->pdev->device == 0x67df) &&
  574. ((adev->pdev->revision == 0xe0) ||
  575. (adev->pdev->revision == 0xe3) ||
  576. (adev->pdev->revision == 0xe4) ||
  577. (adev->pdev->revision == 0xe5) ||
  578. (adev->pdev->revision == 0xe7) ||
  579. (adev->pdev->revision == 0xef))) {
  580. info->is_kicker = true;
  581. strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
  582. } else
  583. strcpy(fw_name, "amdgpu/polaris10_smc.bin");
  584. } else if (type == CGS_UCODE_ID_SMU_SK) {
  585. strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
  586. }
  587. break;
  588. case CHIP_POLARIS12:
  589. strcpy(fw_name, "amdgpu/polaris12_smc.bin");
  590. break;
  591. case CHIP_VEGA10:
  592. if ((adev->pdev->device == 0x687f) &&
  593. ((adev->pdev->revision == 0xc0) ||
  594. (adev->pdev->revision == 0xc1) ||
  595. (adev->pdev->revision == 0xc3)))
  596. strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
  597. else
  598. strcpy(fw_name, "amdgpu/vega10_smc.bin");
  599. break;
  600. default:
  601. DRM_ERROR("SMC firmware not supported\n");
  602. return -EINVAL;
  603. }
  604. err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
  605. if (err) {
  606. DRM_ERROR("Failed to request firmware\n");
  607. return err;
  608. }
  609. err = amdgpu_ucode_validate(adev->pm.fw);
  610. if (err) {
  611. DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
  612. release_firmware(adev->pm.fw);
  613. adev->pm.fw = NULL;
  614. return err;
  615. }
  616. if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
  617. ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
  618. ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
  619. ucode->fw = adev->pm.fw;
  620. header = (const struct common_firmware_header *)ucode->fw->data;
  621. adev->firmware.fw_size +=
  622. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  623. }
  624. }
  625. hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
  626. amdgpu_ucode_print_smc_hdr(&hdr->header);
  627. adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
  628. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
  629. ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
  630. src = (const uint8_t *)(adev->pm.fw->data +
  631. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  632. info->version = adev->pm.fw_version;
  633. info->image_size = ucode_size;
  634. info->ucode_start_address = ucode_start_address;
  635. info->kptr = (void *)src;
  636. }
  637. return 0;
  638. }
  639. static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
  640. {
  641. CGS_FUNC_ADEV;
  642. return amdgpu_sriov_vf(adev);
  643. }
  644. static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
  645. struct cgs_display_info *info)
  646. {
  647. CGS_FUNC_ADEV;
  648. struct cgs_mode_info *mode_info;
  649. if (info == NULL)
  650. return -EINVAL;
  651. mode_info = info->mode_info;
  652. if (mode_info) {
  653. /* if the displays are off, vblank time is max */
  654. mode_info->vblank_time_us = 0xffffffff;
  655. /* always set the reference clock */
  656. mode_info->ref_clock = adev->clock.spll.reference_freq;
  657. }
  658. if (!amdgpu_device_has_dc_support(adev)) {
  659. struct amdgpu_crtc *amdgpu_crtc;
  660. struct drm_device *ddev = adev->ddev;
  661. struct drm_crtc *crtc;
  662. uint32_t line_time_us, vblank_lines;
  663. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  664. list_for_each_entry(crtc,
  665. &ddev->mode_config.crtc_list, head) {
  666. amdgpu_crtc = to_amdgpu_crtc(crtc);
  667. if (crtc->enabled) {
  668. info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
  669. info->display_count++;
  670. }
  671. if (mode_info != NULL &&
  672. crtc->enabled && amdgpu_crtc->enabled &&
  673. amdgpu_crtc->hw_mode.clock) {
  674. line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
  675. amdgpu_crtc->hw_mode.clock;
  676. vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
  677. amdgpu_crtc->hw_mode.crtc_vdisplay +
  678. (amdgpu_crtc->v_border * 2);
  679. mode_info->vblank_time_us = vblank_lines * line_time_us;
  680. mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
  681. /* we have issues with mclk switching with refresh rates
  682. * over 120 hz on the non-DC code.
  683. */
  684. if (mode_info->refresh_rate > 120)
  685. mode_info->vblank_time_us = 0;
  686. mode_info = NULL;
  687. }
  688. }
  689. }
  690. } else {
  691. info->display_count = adev->pm.pm_display_cfg.num_display;
  692. if (mode_info != NULL) {
  693. mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
  694. mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
  695. }
  696. }
  697. return 0;
  698. }
  699. static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
  700. {
  701. CGS_FUNC_ADEV;
  702. adev->pm.dpm_enabled = enabled;
  703. return 0;
  704. }
  705. static const struct cgs_ops amdgpu_cgs_ops = {
  706. .read_register = amdgpu_cgs_read_register,
  707. .write_register = amdgpu_cgs_write_register,
  708. .read_ind_register = amdgpu_cgs_read_ind_register,
  709. .write_ind_register = amdgpu_cgs_write_ind_register,
  710. .get_pci_resource = amdgpu_cgs_get_pci_resource,
  711. .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
  712. .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
  713. .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
  714. .get_firmware_info = amdgpu_cgs_get_firmware_info,
  715. .rel_firmware = amdgpu_cgs_rel_firmware,
  716. .set_powergating_state = amdgpu_cgs_set_powergating_state,
  717. .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
  718. .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
  719. .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
  720. .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
  721. .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
  722. .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
  723. };
  724. static const struct cgs_os_ops amdgpu_cgs_os_ops = {
  725. .add_irq_source = amdgpu_cgs_add_irq_source,
  726. .irq_get = amdgpu_cgs_irq_get,
  727. .irq_put = amdgpu_cgs_irq_put
  728. };
  729. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
  730. {
  731. struct amdgpu_cgs_device *cgs_device =
  732. kmalloc(sizeof(*cgs_device), GFP_KERNEL);
  733. if (!cgs_device) {
  734. DRM_ERROR("Couldn't allocate CGS device structure\n");
  735. return NULL;
  736. }
  737. cgs_device->base.ops = &amdgpu_cgs_ops;
  738. cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
  739. cgs_device->adev = adev;
  740. return (struct cgs_device *)cgs_device;
  741. }
  742. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
  743. {
  744. kfree(cgs_device);
  745. }