amdgpu_acp.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include <linux/irqdomain.h>
  26. #include <linux/pm_domain.h>
  27. #include <linux/platform_device.h>
  28. #include <sound/designware_i2s.h>
  29. #include <sound/pcm.h>
  30. #include "amdgpu.h"
  31. #include "atom.h"
  32. #include "amdgpu_acp.h"
  33. #include "acp_gfx_if.h"
  34. #define ACP_TILE_ON_MASK 0x03
  35. #define ACP_TILE_OFF_MASK 0x02
  36. #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
  37. #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
  38. #define ACP_TILE_P1_MASK 0x3e
  39. #define ACP_TILE_P2_MASK 0x3d
  40. #define ACP_TILE_DSP0_MASK 0x3b
  41. #define ACP_TILE_DSP1_MASK 0x37
  42. #define ACP_TILE_DSP2_MASK 0x2f
  43. #define ACP_DMA_REGS_END 0x146c0
  44. #define ACP_I2S_PLAY_REGS_START 0x14840
  45. #define ACP_I2S_PLAY_REGS_END 0x148b4
  46. #define ACP_I2S_CAP_REGS_START 0x148b8
  47. #define ACP_I2S_CAP_REGS_END 0x1496c
  48. #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
  49. #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
  50. #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
  51. #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
  52. #define ACP_BT_PLAY_REGS_START 0x14970
  53. #define ACP_BT_PLAY_REGS_END 0x14a24
  54. #define ACP_BT_COMP1_REG_OFFSET 0xac
  55. #define ACP_BT_COMP2_REG_OFFSET 0xa8
  56. #define mmACP_PGFSM_RETAIN_REG 0x51c9
  57. #define mmACP_PGFSM_CONFIG_REG 0x51ca
  58. #define mmACP_PGFSM_READ_REG_0 0x51cc
  59. #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
  60. #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
  61. #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
  62. #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
  63. #define mmACP_CONTROL 0x5131
  64. #define mmACP_STATUS 0x5133
  65. #define mmACP_SOFT_RESET 0x5134
  66. #define ACP_CONTROL__ClkEn_MASK 0x1
  67. #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
  68. #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
  69. #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
  70. #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
  71. #define ACP_TIMEOUT_LOOP 0x000000FF
  72. #define ACP_DEVS 4
  73. #define ACP_SRC_ID 162
  74. enum {
  75. ACP_TILE_P1 = 0,
  76. ACP_TILE_P2,
  77. ACP_TILE_DSP0,
  78. ACP_TILE_DSP1,
  79. ACP_TILE_DSP2,
  80. };
  81. static int acp_sw_init(void *handle)
  82. {
  83. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  84. adev->acp.parent = adev->dev;
  85. adev->acp.cgs_device =
  86. amdgpu_cgs_create_device(adev);
  87. if (!adev->acp.cgs_device)
  88. return -EINVAL;
  89. return 0;
  90. }
  91. static int acp_sw_fini(void *handle)
  92. {
  93. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  94. if (adev->acp.cgs_device)
  95. amdgpu_cgs_destroy_device(adev->acp.cgs_device);
  96. return 0;
  97. }
  98. /* power off a tile/block within ACP */
  99. static int acp_suspend_tile(void *cgs_dev, int tile)
  100. {
  101. u32 val = 0;
  102. u32 count = 0;
  103. if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
  104. pr_err("Invalid ACP tile : %d to suspend\n", tile);
  105. return -1;
  106. }
  107. val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
  108. val &= ACP_TILE_ON_MASK;
  109. if (val == 0x0) {
  110. val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
  111. val = val | (1 << tile);
  112. cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
  113. cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
  114. 0x500 + tile);
  115. count = ACP_TIMEOUT_LOOP;
  116. while (true) {
  117. val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
  118. + tile);
  119. val = val & ACP_TILE_ON_MASK;
  120. if (val == ACP_TILE_OFF_MASK)
  121. break;
  122. if (--count == 0) {
  123. pr_err("Timeout reading ACP PGFSM status\n");
  124. return -ETIMEDOUT;
  125. }
  126. udelay(100);
  127. }
  128. val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
  129. val |= ACP_TILE_OFF_RETAIN_REG_MASK;
  130. cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
  131. }
  132. return 0;
  133. }
  134. /* power on a tile/block within ACP */
  135. static int acp_resume_tile(void *cgs_dev, int tile)
  136. {
  137. u32 val = 0;
  138. u32 count = 0;
  139. if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
  140. pr_err("Invalid ACP tile to resume\n");
  141. return -1;
  142. }
  143. val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
  144. val = val & ACP_TILE_ON_MASK;
  145. if (val != 0x0) {
  146. cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
  147. 0x600 + tile);
  148. count = ACP_TIMEOUT_LOOP;
  149. while (true) {
  150. val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
  151. + tile);
  152. val = val & ACP_TILE_ON_MASK;
  153. if (val == 0x0)
  154. break;
  155. if (--count == 0) {
  156. pr_err("Timeout reading ACP PGFSM status\n");
  157. return -ETIMEDOUT;
  158. }
  159. udelay(100);
  160. }
  161. val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
  162. if (tile == ACP_TILE_P1)
  163. val = val & (ACP_TILE_P1_MASK);
  164. else if (tile == ACP_TILE_P2)
  165. val = val & (ACP_TILE_P2_MASK);
  166. cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
  167. }
  168. return 0;
  169. }
  170. struct acp_pm_domain {
  171. void *cgs_dev;
  172. struct generic_pm_domain gpd;
  173. };
  174. static int acp_poweroff(struct generic_pm_domain *genpd)
  175. {
  176. int i, ret;
  177. struct acp_pm_domain *apd;
  178. apd = container_of(genpd, struct acp_pm_domain, gpd);
  179. if (apd != NULL) {
  180. /* Donot return abruptly if any of power tile fails to suspend.
  181. * Log it and continue powering off other tile
  182. */
  183. for (i = 4; i >= 0 ; i--) {
  184. ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
  185. if (ret)
  186. pr_err("ACP tile %d tile suspend failed\n", i);
  187. }
  188. }
  189. return 0;
  190. }
  191. static int acp_poweron(struct generic_pm_domain *genpd)
  192. {
  193. int i, ret;
  194. struct acp_pm_domain *apd;
  195. apd = container_of(genpd, struct acp_pm_domain, gpd);
  196. if (apd != NULL) {
  197. for (i = 0; i < 2; i++) {
  198. ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
  199. if (ret) {
  200. pr_err("ACP tile %d resume failed\n", i);
  201. break;
  202. }
  203. }
  204. /* Disable DSPs which are not going to be used */
  205. for (i = 0; i < 3; i++) {
  206. ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
  207. /* Continue suspending other DSP, even if one fails */
  208. if (ret)
  209. pr_err("ACP DSP %d suspend failed\n", i);
  210. }
  211. }
  212. return 0;
  213. }
  214. static struct device *get_mfd_cell_dev(const char *device_name, int r)
  215. {
  216. char auto_dev_name[25];
  217. struct device *dev;
  218. snprintf(auto_dev_name, sizeof(auto_dev_name),
  219. "%s.%d.auto", device_name, r);
  220. dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
  221. dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
  222. return dev;
  223. }
  224. /**
  225. * acp_hw_init - start and test ACP block
  226. *
  227. * @adev: amdgpu_device pointer
  228. *
  229. */
  230. static int acp_hw_init(void *handle)
  231. {
  232. int r, i;
  233. uint64_t acp_base;
  234. u32 val = 0;
  235. u32 count = 0;
  236. struct device *dev;
  237. struct i2s_platform_data *i2s_pdata;
  238. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  239. const struct amdgpu_ip_block *ip_block =
  240. amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
  241. if (!ip_block)
  242. return -EINVAL;
  243. r = amd_acp_hw_init(adev->acp.cgs_device,
  244. ip_block->version->major, ip_block->version->minor);
  245. /* -ENODEV means board uses AZ rather than ACP */
  246. if (r == -ENODEV) {
  247. amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
  248. return 0;
  249. } else if (r) {
  250. return r;
  251. }
  252. if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
  253. return -EINVAL;
  254. acp_base = adev->rmmio_base;
  255. if (adev->asic_type != CHIP_STONEY) {
  256. adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
  257. if (adev->acp.acp_genpd == NULL)
  258. return -ENOMEM;
  259. adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
  260. adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
  261. adev->acp.acp_genpd->gpd.power_on = acp_poweron;
  262. adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
  263. pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
  264. }
  265. adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
  266. GFP_KERNEL);
  267. if (adev->acp.acp_cell == NULL)
  268. return -ENOMEM;
  269. adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
  270. if (adev->acp.acp_res == NULL) {
  271. kfree(adev->acp.acp_cell);
  272. return -ENOMEM;
  273. }
  274. i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
  275. if (i2s_pdata == NULL) {
  276. kfree(adev->acp.acp_res);
  277. kfree(adev->acp.acp_cell);
  278. return -ENOMEM;
  279. }
  280. switch (adev->asic_type) {
  281. case CHIP_STONEY:
  282. i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
  283. DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
  284. break;
  285. default:
  286. i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
  287. }
  288. i2s_pdata[0].cap = DWC_I2S_PLAY;
  289. i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
  290. i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
  291. i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
  292. switch (adev->asic_type) {
  293. case CHIP_STONEY:
  294. i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
  295. DW_I2S_QUIRK_COMP_PARAM1 |
  296. DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
  297. break;
  298. default:
  299. i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
  300. DW_I2S_QUIRK_COMP_PARAM1;
  301. }
  302. i2s_pdata[1].cap = DWC_I2S_RECORD;
  303. i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
  304. i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
  305. i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
  306. i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
  307. switch (adev->asic_type) {
  308. case CHIP_STONEY:
  309. i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
  310. break;
  311. default:
  312. break;
  313. }
  314. i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
  315. i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
  316. i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
  317. i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
  318. adev->acp.acp_res[0].name = "acp2x_dma";
  319. adev->acp.acp_res[0].flags = IORESOURCE_MEM;
  320. adev->acp.acp_res[0].start = acp_base;
  321. adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
  322. adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
  323. adev->acp.acp_res[1].flags = IORESOURCE_MEM;
  324. adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
  325. adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
  326. adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
  327. adev->acp.acp_res[2].flags = IORESOURCE_MEM;
  328. adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
  329. adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
  330. adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
  331. adev->acp.acp_res[3].flags = IORESOURCE_MEM;
  332. adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
  333. adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
  334. adev->acp.acp_res[4].name = "acp2x_dma_irq";
  335. adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
  336. adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
  337. adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
  338. adev->acp.acp_cell[0].name = "acp_audio_dma";
  339. adev->acp.acp_cell[0].num_resources = 5;
  340. adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
  341. adev->acp.acp_cell[0].platform_data = &adev->asic_type;
  342. adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
  343. adev->acp.acp_cell[1].name = "designware-i2s";
  344. adev->acp.acp_cell[1].num_resources = 1;
  345. adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
  346. adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
  347. adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
  348. adev->acp.acp_cell[2].name = "designware-i2s";
  349. adev->acp.acp_cell[2].num_resources = 1;
  350. adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
  351. adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
  352. adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
  353. adev->acp.acp_cell[3].name = "designware-i2s";
  354. adev->acp.acp_cell[3].num_resources = 1;
  355. adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
  356. adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
  357. adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
  358. r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
  359. ACP_DEVS);
  360. if (r)
  361. return r;
  362. if (adev->asic_type != CHIP_STONEY) {
  363. for (i = 0; i < ACP_DEVS ; i++) {
  364. dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
  365. r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
  366. if (r) {
  367. dev_err(dev, "Failed to add dev to genpd\n");
  368. return r;
  369. }
  370. }
  371. }
  372. /* Assert Soft reset of ACP */
  373. val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
  374. val |= ACP_SOFT_RESET__SoftResetAud_MASK;
  375. cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
  376. count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
  377. while (true) {
  378. val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
  379. if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
  380. (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
  381. break;
  382. if (--count == 0) {
  383. dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
  384. return -ETIMEDOUT;
  385. }
  386. udelay(100);
  387. }
  388. /* Enable clock to ACP and wait until the clock is enabled */
  389. val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
  390. val = val | ACP_CONTROL__ClkEn_MASK;
  391. cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
  392. count = ACP_CLOCK_EN_TIME_OUT_VALUE;
  393. while (true) {
  394. val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
  395. if (val & (u32) 0x1)
  396. break;
  397. if (--count == 0) {
  398. dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
  399. return -ETIMEDOUT;
  400. }
  401. udelay(100);
  402. }
  403. /* Deassert the SOFT RESET flags */
  404. val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
  405. val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
  406. cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
  407. return 0;
  408. }
  409. /**
  410. * acp_hw_fini - stop the hardware block
  411. *
  412. * @adev: amdgpu_device pointer
  413. *
  414. */
  415. static int acp_hw_fini(void *handle)
  416. {
  417. int i, ret;
  418. u32 val = 0;
  419. u32 count = 0;
  420. struct device *dev;
  421. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  422. /* return early if no ACP */
  423. if (!adev->acp.acp_cell) {
  424. amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
  425. return 0;
  426. }
  427. /* Assert Soft reset of ACP */
  428. val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
  429. val |= ACP_SOFT_RESET__SoftResetAud_MASK;
  430. cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
  431. count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
  432. while (true) {
  433. val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
  434. if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
  435. (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
  436. break;
  437. if (--count == 0) {
  438. dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
  439. return -ETIMEDOUT;
  440. }
  441. udelay(100);
  442. }
  443. /* Disable ACP clock */
  444. val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
  445. val &= ~ACP_CONTROL__ClkEn_MASK;
  446. cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
  447. count = ACP_CLOCK_EN_TIME_OUT_VALUE;
  448. while (true) {
  449. val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
  450. if (val & (u32) 0x1)
  451. break;
  452. if (--count == 0) {
  453. dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
  454. return -ETIMEDOUT;
  455. }
  456. udelay(100);
  457. }
  458. if (adev->acp.acp_genpd) {
  459. for (i = 0; i < ACP_DEVS ; i++) {
  460. dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
  461. ret = pm_genpd_remove_device(dev);
  462. /* If removal fails, dont giveup and try rest */
  463. if (ret)
  464. dev_err(dev, "remove dev from genpd failed\n");
  465. }
  466. kfree(adev->acp.acp_genpd);
  467. }
  468. mfd_remove_devices(adev->acp.parent);
  469. kfree(adev->acp.acp_res);
  470. kfree(adev->acp.acp_cell);
  471. return 0;
  472. }
  473. static int acp_suspend(void *handle)
  474. {
  475. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  476. /* power up on suspend */
  477. if (!adev->acp.acp_cell)
  478. amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
  479. return 0;
  480. }
  481. static int acp_resume(void *handle)
  482. {
  483. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  484. /* power down again on resume */
  485. if (!adev->acp.acp_cell)
  486. amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
  487. return 0;
  488. }
  489. static int acp_early_init(void *handle)
  490. {
  491. return 0;
  492. }
  493. static bool acp_is_idle(void *handle)
  494. {
  495. return true;
  496. }
  497. static int acp_wait_for_idle(void *handle)
  498. {
  499. return 0;
  500. }
  501. static int acp_soft_reset(void *handle)
  502. {
  503. return 0;
  504. }
  505. static int acp_set_clockgating_state(void *handle,
  506. enum amd_clockgating_state state)
  507. {
  508. return 0;
  509. }
  510. static int acp_set_powergating_state(void *handle,
  511. enum amd_powergating_state state)
  512. {
  513. return 0;
  514. }
  515. static const struct amd_ip_funcs acp_ip_funcs = {
  516. .name = "acp_ip",
  517. .early_init = acp_early_init,
  518. .late_init = NULL,
  519. .sw_init = acp_sw_init,
  520. .sw_fini = acp_sw_fini,
  521. .hw_init = acp_hw_init,
  522. .hw_fini = acp_hw_fini,
  523. .suspend = acp_suspend,
  524. .resume = acp_resume,
  525. .is_idle = acp_is_idle,
  526. .wait_for_idle = acp_wait_for_idle,
  527. .soft_reset = acp_soft_reset,
  528. .set_clockgating_state = acp_set_clockgating_state,
  529. .set_powergating_state = acp_set_powergating_state,
  530. };
  531. const struct amdgpu_ip_block_version acp_ip_block =
  532. {
  533. .type = AMD_IP_BLOCK_TYPE_ACP,
  534. .major = 2,
  535. .minor = 2,
  536. .rev = 0,
  537. .funcs = &acp_ip_funcs,
  538. };