vega10_ih.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_ih.h"
  26. #include "soc15.h"
  27. #include "vega10/soc15ip.h"
  28. #include "vega10/OSSSYS/osssys_4_0_offset.h"
  29. #include "vega10/OSSSYS/osssys_4_0_sh_mask.h"
  30. #include "soc15_common.h"
  31. #include "vega10_ih.h"
  32. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
  33. /**
  34. * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
  35. *
  36. * @adev: amdgpu_device pointer
  37. *
  38. * Enable the interrupt ring buffer (VEGA10).
  39. */
  40. static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
  41. {
  42. u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  43. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
  44. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
  45. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
  46. adev->irq.ih.enabled = true;
  47. }
  48. /**
  49. * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
  50. *
  51. * @adev: amdgpu_device pointer
  52. *
  53. * Disable the interrupt ring buffer (VEGA10).
  54. */
  55. static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
  56. {
  57. u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  58. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
  59. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
  60. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
  61. /* set rptr, wptr to 0 */
  62. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
  63. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
  64. adev->irq.ih.enabled = false;
  65. adev->irq.ih.rptr = 0;
  66. }
  67. /**
  68. * vega10_ih_irq_init - init and enable the interrupt ring
  69. *
  70. * @adev: amdgpu_device pointer
  71. *
  72. * Allocate a ring buffer for the interrupt controller,
  73. * enable the RLC, disable interrupts, enable the IH
  74. * ring buffer and enable it (VI).
  75. * Called at device load and reume.
  76. * Returns 0 for success, errors for failure.
  77. */
  78. static int vega10_ih_irq_init(struct amdgpu_device *adev)
  79. {
  80. int ret = 0;
  81. int rb_bufsz;
  82. u32 ih_rb_cntl, ih_doorbell_rtpr;
  83. u32 tmp;
  84. u64 wptr_off;
  85. /* disable irqs */
  86. vega10_ih_disable_interrupts(adev);
  87. if (adev->flags & AMD_IS_APU)
  88. nbio_v7_0_ih_control(adev);
  89. else
  90. nbio_v6_1_ih_control(adev);
  91. ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  92. /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
  93. if (adev->irq.ih.use_bus_addr) {
  94. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.rb_dma_addr >> 8);
  95. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
  96. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
  97. } else {
  98. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.gpu_addr >> 8);
  99. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), (adev->irq.ih.gpu_addr >> 40) & 0xff);
  100. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
  101. }
  102. rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
  103. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  104. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
  105. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
  106. /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
  107. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
  108. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
  109. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
  110. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
  111. if (adev->irq.msi_enabled)
  112. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
  113. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
  114. /* set the writeback address whether it's enabled or not */
  115. if (adev->irq.ih.use_bus_addr)
  116. wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
  117. else
  118. wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
  119. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO), lower_32_bits(wptr_off));
  120. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI), upper_32_bits(wptr_off) & 0xFF);
  121. /* set rptr, wptr to 0 */
  122. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
  123. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
  124. ih_doorbell_rtpr = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR));
  125. if (adev->irq.ih.use_doorbell) {
  126. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  127. OFFSET, adev->irq.ih.doorbell_index);
  128. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  129. ENABLE, 1);
  130. } else {
  131. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  132. ENABLE, 0);
  133. }
  134. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
  135. if (adev->flags & AMD_IS_APU)
  136. nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
  137. else
  138. nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
  139. tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
  140. tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
  141. CLIENT18_IS_STORM_CLIENT, 1);
  142. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL), tmp);
  143. tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL));
  144. tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
  145. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL), tmp);
  146. pci_set_master(adev->pdev);
  147. /* enable interrupts */
  148. vega10_ih_enable_interrupts(adev);
  149. return ret;
  150. }
  151. /**
  152. * vega10_ih_irq_disable - disable interrupts
  153. *
  154. * @adev: amdgpu_device pointer
  155. *
  156. * Disable interrupts on the hw (VEGA10).
  157. */
  158. static void vega10_ih_irq_disable(struct amdgpu_device *adev)
  159. {
  160. vega10_ih_disable_interrupts(adev);
  161. /* Wait and acknowledge irq */
  162. mdelay(1);
  163. }
  164. /**
  165. * vega10_ih_get_wptr - get the IH ring buffer wptr
  166. *
  167. * @adev: amdgpu_device pointer
  168. *
  169. * Get the IH ring buffer wptr from either the register
  170. * or the writeback memory buffer (VEGA10). Also check for
  171. * ring buffer overflow and deal with it.
  172. * Returns the value of the wptr.
  173. */
  174. static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
  175. {
  176. u32 wptr, tmp;
  177. if (adev->irq.ih.use_bus_addr)
  178. wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
  179. else
  180. wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
  181. if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
  182. wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
  183. /* When a ring buffer overflow happen start parsing interrupt
  184. * from the last not overwritten vector (wptr + 32). Hopefully
  185. * this should allow us to catchup.
  186. */
  187. tmp = (wptr + 32) & adev->irq.ih.ptr_mask;
  188. dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
  189. wptr, adev->irq.ih.rptr, tmp);
  190. adev->irq.ih.rptr = tmp;
  191. tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  192. tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  193. WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
  194. }
  195. return (wptr & adev->irq.ih.ptr_mask);
  196. }
  197. /**
  198. * vega10_ih_prescreen_iv - prescreen an interrupt vector
  199. *
  200. * @adev: amdgpu_device pointer
  201. *
  202. * Returns true if the interrupt vector should be further processed.
  203. */
  204. static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
  205. {
  206. u32 ring_index = adev->irq.ih.rptr >> 2;
  207. u32 dw0, dw3, dw4, dw5;
  208. u16 pasid;
  209. u64 addr, key;
  210. struct amdgpu_vm *vm;
  211. int r;
  212. dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  213. dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  214. dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  215. dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  216. /* Filter retry page faults, let only the first one pass. If
  217. * there are too many outstanding faults, ignore them until
  218. * some faults get cleared.
  219. */
  220. switch (dw0 & 0xff) {
  221. case AMDGPU_IH_CLIENTID_VMC:
  222. case AMDGPU_IH_CLIENTID_UTCL2:
  223. break;
  224. default:
  225. /* Not a VM fault */
  226. return true;
  227. }
  228. /* Not a retry fault */
  229. if (!(dw5 & 0x80))
  230. return true;
  231. pasid = dw3 & 0xffff;
  232. /* No PASID, can't identify faulting process */
  233. if (!pasid)
  234. return true;
  235. addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
  236. key = AMDGPU_VM_FAULT(pasid, addr);
  237. r = amdgpu_ih_add_fault(adev, key);
  238. /* Hash table is full or the fault is already being processed,
  239. * ignore further page faults
  240. */
  241. if (r != 0)
  242. goto ignore_iv;
  243. /* Track retry faults in per-VM fault FIFO. */
  244. spin_lock(&adev->vm_manager.pasid_lock);
  245. vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
  246. spin_unlock(&adev->vm_manager.pasid_lock);
  247. if (WARN_ON_ONCE(!vm)) {
  248. /* VM not found, process it normally */
  249. amdgpu_ih_clear_fault(adev, key);
  250. return true;
  251. }
  252. /* No locking required with single writer and single reader */
  253. r = kfifo_put(&vm->faults, key);
  254. if (!r) {
  255. /* FIFO is full. Ignore it until there is space */
  256. amdgpu_ih_clear_fault(adev, key);
  257. goto ignore_iv;
  258. }
  259. /* It's the first fault for this address, process it normally */
  260. return true;
  261. ignore_iv:
  262. adev->irq.ih.rptr += 32;
  263. return false;
  264. }
  265. /**
  266. * vega10_ih_decode_iv - decode an interrupt vector
  267. *
  268. * @adev: amdgpu_device pointer
  269. *
  270. * Decodes the interrupt vector at the current rptr
  271. * position and also advance the position.
  272. */
  273. static void vega10_ih_decode_iv(struct amdgpu_device *adev,
  274. struct amdgpu_iv_entry *entry)
  275. {
  276. /* wptr/rptr are in bytes! */
  277. u32 ring_index = adev->irq.ih.rptr >> 2;
  278. uint32_t dw[8];
  279. dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  280. dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
  281. dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
  282. dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  283. dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  284. dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  285. dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]);
  286. dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]);
  287. entry->client_id = dw[0] & 0xff;
  288. entry->src_id = (dw[0] >> 8) & 0xff;
  289. entry->ring_id = (dw[0] >> 16) & 0xff;
  290. entry->vm_id = (dw[0] >> 24) & 0xf;
  291. entry->vm_id_src = (dw[0] >> 31);
  292. entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
  293. entry->timestamp_src = dw[2] >> 31;
  294. entry->pas_id = dw[3] & 0xffff;
  295. entry->pasid_src = dw[3] >> 31;
  296. entry->src_data[0] = dw[4];
  297. entry->src_data[1] = dw[5];
  298. entry->src_data[2] = dw[6];
  299. entry->src_data[3] = dw[7];
  300. /* wptr/rptr are in bytes! */
  301. adev->irq.ih.rptr += 32;
  302. }
  303. /**
  304. * vega10_ih_set_rptr - set the IH ring buffer rptr
  305. *
  306. * @adev: amdgpu_device pointer
  307. *
  308. * Set the IH ring buffer rptr.
  309. */
  310. static void vega10_ih_set_rptr(struct amdgpu_device *adev)
  311. {
  312. if (adev->irq.ih.use_doorbell) {
  313. /* XXX check if swapping is necessary on BE */
  314. if (adev->irq.ih.use_bus_addr)
  315. adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  316. else
  317. adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  318. WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
  319. } else {
  320. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), adev->irq.ih.rptr);
  321. }
  322. }
  323. static int vega10_ih_early_init(void *handle)
  324. {
  325. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  326. vega10_ih_set_interrupt_funcs(adev);
  327. return 0;
  328. }
  329. static int vega10_ih_sw_init(void *handle)
  330. {
  331. int r;
  332. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  333. r = amdgpu_ih_ring_init(adev, 256 * 1024, true);
  334. if (r)
  335. return r;
  336. adev->irq.ih.use_doorbell = true;
  337. adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
  338. adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
  339. if (!adev->irq.ih.faults)
  340. return -ENOMEM;
  341. INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
  342. AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
  343. spin_lock_init(&adev->irq.ih.faults->lock);
  344. adev->irq.ih.faults->count = 0;
  345. r = amdgpu_irq_init(adev);
  346. return r;
  347. }
  348. static int vega10_ih_sw_fini(void *handle)
  349. {
  350. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  351. amdgpu_irq_fini(adev);
  352. amdgpu_ih_ring_fini(adev);
  353. kfree(adev->irq.ih.faults);
  354. adev->irq.ih.faults = NULL;
  355. return 0;
  356. }
  357. static int vega10_ih_hw_init(void *handle)
  358. {
  359. int r;
  360. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  361. r = vega10_ih_irq_init(adev);
  362. if (r)
  363. return r;
  364. return 0;
  365. }
  366. static int vega10_ih_hw_fini(void *handle)
  367. {
  368. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  369. vega10_ih_irq_disable(adev);
  370. return 0;
  371. }
  372. static int vega10_ih_suspend(void *handle)
  373. {
  374. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  375. return vega10_ih_hw_fini(adev);
  376. }
  377. static int vega10_ih_resume(void *handle)
  378. {
  379. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  380. return vega10_ih_hw_init(adev);
  381. }
  382. static bool vega10_ih_is_idle(void *handle)
  383. {
  384. /* todo */
  385. return true;
  386. }
  387. static int vega10_ih_wait_for_idle(void *handle)
  388. {
  389. /* todo */
  390. return -ETIMEDOUT;
  391. }
  392. static int vega10_ih_soft_reset(void *handle)
  393. {
  394. /* todo */
  395. return 0;
  396. }
  397. static int vega10_ih_set_clockgating_state(void *handle,
  398. enum amd_clockgating_state state)
  399. {
  400. return 0;
  401. }
  402. static int vega10_ih_set_powergating_state(void *handle,
  403. enum amd_powergating_state state)
  404. {
  405. return 0;
  406. }
  407. const struct amd_ip_funcs vega10_ih_ip_funcs = {
  408. .name = "vega10_ih",
  409. .early_init = vega10_ih_early_init,
  410. .late_init = NULL,
  411. .sw_init = vega10_ih_sw_init,
  412. .sw_fini = vega10_ih_sw_fini,
  413. .hw_init = vega10_ih_hw_init,
  414. .hw_fini = vega10_ih_hw_fini,
  415. .suspend = vega10_ih_suspend,
  416. .resume = vega10_ih_resume,
  417. .is_idle = vega10_ih_is_idle,
  418. .wait_for_idle = vega10_ih_wait_for_idle,
  419. .soft_reset = vega10_ih_soft_reset,
  420. .set_clockgating_state = vega10_ih_set_clockgating_state,
  421. .set_powergating_state = vega10_ih_set_powergating_state,
  422. };
  423. static const struct amdgpu_ih_funcs vega10_ih_funcs = {
  424. .get_wptr = vega10_ih_get_wptr,
  425. .prescreen_iv = vega10_ih_prescreen_iv,
  426. .decode_iv = vega10_ih_decode_iv,
  427. .set_rptr = vega10_ih_set_rptr
  428. };
  429. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
  430. {
  431. if (adev->irq.ih_funcs == NULL)
  432. adev->irq.ih_funcs = &vega10_ih_funcs;
  433. }
  434. const struct amdgpu_ip_block_version vega10_ih_ip_block =
  435. {
  436. .type = AMD_IP_BLOCK_TYPE_IH,
  437. .major = 4,
  438. .minor = 0,
  439. .rev = 0,
  440. .funcs = &vega10_ih_ip_funcs,
  441. };