vega10_ih.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_ih.h"
  26. #include "soc15.h"
  27. #include "oss/osssys_4_0_offset.h"
  28. #include "oss/osssys_4_0_sh_mask.h"
  29. #include "soc15_common.h"
  30. #include "vega10_ih.h"
  31. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
  32. /**
  33. * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
  34. *
  35. * @adev: amdgpu_device pointer
  36. *
  37. * Enable the interrupt ring buffer (VEGA10).
  38. */
  39. static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
  40. {
  41. u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
  42. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
  43. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
  44. WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
  45. adev->irq.ih.enabled = true;
  46. }
  47. /**
  48. * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
  49. *
  50. * @adev: amdgpu_device pointer
  51. *
  52. * Disable the interrupt ring buffer (VEGA10).
  53. */
  54. static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
  55. {
  56. u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
  57. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
  58. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
  59. WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
  60. /* set rptr, wptr to 0 */
  61. WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
  62. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
  63. adev->irq.ih.enabled = false;
  64. adev->irq.ih.rptr = 0;
  65. }
  66. /**
  67. * vega10_ih_irq_init - init and enable the interrupt ring
  68. *
  69. * @adev: amdgpu_device pointer
  70. *
  71. * Allocate a ring buffer for the interrupt controller,
  72. * enable the RLC, disable interrupts, enable the IH
  73. * ring buffer and enable it (VI).
  74. * Called at device load and reume.
  75. * Returns 0 for success, errors for failure.
  76. */
  77. static int vega10_ih_irq_init(struct amdgpu_device *adev)
  78. {
  79. int ret = 0;
  80. int rb_bufsz;
  81. u32 ih_rb_cntl, ih_doorbell_rtpr;
  82. u32 tmp;
  83. u64 wptr_off;
  84. /* disable irqs */
  85. vega10_ih_disable_interrupts(adev);
  86. if (adev->flags & AMD_IS_APU)
  87. nbio_v7_0_ih_control(adev);
  88. else
  89. nbio_v6_1_ih_control(adev);
  90. ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
  91. /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
  92. if (adev->irq.ih.use_bus_addr) {
  93. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
  94. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
  95. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
  96. } else {
  97. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
  98. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (adev->irq.ih.gpu_addr >> 40) & 0xff);
  99. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
  100. }
  101. rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
  102. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  103. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
  104. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
  105. /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
  106. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
  107. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
  108. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
  109. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
  110. if (adev->irq.msi_enabled)
  111. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
  112. WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
  113. /* set the writeback address whether it's enabled or not */
  114. if (adev->irq.ih.use_bus_addr)
  115. wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
  116. else
  117. wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
  118. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
  119. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
  120. /* set rptr, wptr to 0 */
  121. WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
  122. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
  123. ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
  124. if (adev->irq.ih.use_doorbell) {
  125. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  126. OFFSET, adev->irq.ih.doorbell_index);
  127. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  128. ENABLE, 1);
  129. } else {
  130. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  131. ENABLE, 0);
  132. }
  133. WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
  134. if (adev->flags & AMD_IS_APU)
  135. nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
  136. else
  137. nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
  138. tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
  139. tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
  140. CLIENT18_IS_STORM_CLIENT, 1);
  141. WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
  142. tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
  143. tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
  144. WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
  145. pci_set_master(adev->pdev);
  146. /* enable interrupts */
  147. vega10_ih_enable_interrupts(adev);
  148. return ret;
  149. }
  150. /**
  151. * vega10_ih_irq_disable - disable interrupts
  152. *
  153. * @adev: amdgpu_device pointer
  154. *
  155. * Disable interrupts on the hw (VEGA10).
  156. */
  157. static void vega10_ih_irq_disable(struct amdgpu_device *adev)
  158. {
  159. vega10_ih_disable_interrupts(adev);
  160. /* Wait and acknowledge irq */
  161. mdelay(1);
  162. }
  163. /**
  164. * vega10_ih_get_wptr - get the IH ring buffer wptr
  165. *
  166. * @adev: amdgpu_device pointer
  167. *
  168. * Get the IH ring buffer wptr from either the register
  169. * or the writeback memory buffer (VEGA10). Also check for
  170. * ring buffer overflow and deal with it.
  171. * Returns the value of the wptr.
  172. */
  173. static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
  174. {
  175. u32 wptr, tmp;
  176. if (adev->irq.ih.use_bus_addr)
  177. wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
  178. else
  179. wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
  180. if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
  181. wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
  182. /* When a ring buffer overflow happen start parsing interrupt
  183. * from the last not overwritten vector (wptr + 32). Hopefully
  184. * this should allow us to catchup.
  185. */
  186. tmp = (wptr + 32) & adev->irq.ih.ptr_mask;
  187. dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
  188. wptr, adev->irq.ih.rptr, tmp);
  189. adev->irq.ih.rptr = tmp;
  190. tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  191. tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  192. WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
  193. }
  194. return (wptr & adev->irq.ih.ptr_mask);
  195. }
  196. /**
  197. * vega10_ih_prescreen_iv - prescreen an interrupt vector
  198. *
  199. * @adev: amdgpu_device pointer
  200. *
  201. * Returns true if the interrupt vector should be further processed.
  202. */
  203. static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
  204. {
  205. u32 ring_index = adev->irq.ih.rptr >> 2;
  206. u32 dw0, dw3, dw4, dw5;
  207. u16 pasid;
  208. u64 addr, key;
  209. struct amdgpu_vm *vm;
  210. int r;
  211. dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  212. dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  213. dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  214. dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  215. /* Filter retry page faults, let only the first one pass. If
  216. * there are too many outstanding faults, ignore them until
  217. * some faults get cleared.
  218. */
  219. switch (dw0 & 0xff) {
  220. case AMDGPU_IH_CLIENTID_VMC:
  221. case AMDGPU_IH_CLIENTID_UTCL2:
  222. break;
  223. default:
  224. /* Not a VM fault */
  225. return true;
  226. }
  227. pasid = dw3 & 0xffff;
  228. /* No PASID, can't identify faulting process */
  229. if (!pasid)
  230. return true;
  231. /* Not a retry fault, check fault credit */
  232. if (!(dw5 & 0x80)) {
  233. if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
  234. goto ignore_iv;
  235. return true;
  236. }
  237. addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
  238. key = AMDGPU_VM_FAULT(pasid, addr);
  239. r = amdgpu_ih_add_fault(adev, key);
  240. /* Hash table is full or the fault is already being processed,
  241. * ignore further page faults
  242. */
  243. if (r != 0)
  244. goto ignore_iv;
  245. /* Track retry faults in per-VM fault FIFO. */
  246. spin_lock(&adev->vm_manager.pasid_lock);
  247. vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
  248. spin_unlock(&adev->vm_manager.pasid_lock);
  249. if (WARN_ON_ONCE(!vm)) {
  250. /* VM not found, process it normally */
  251. amdgpu_ih_clear_fault(adev, key);
  252. return true;
  253. }
  254. /* No locking required with single writer and single reader */
  255. r = kfifo_put(&vm->faults, key);
  256. if (!r) {
  257. /* FIFO is full. Ignore it until there is space */
  258. amdgpu_ih_clear_fault(adev, key);
  259. goto ignore_iv;
  260. }
  261. /* It's the first fault for this address, process it normally */
  262. return true;
  263. ignore_iv:
  264. adev->irq.ih.rptr += 32;
  265. return false;
  266. }
  267. /**
  268. * vega10_ih_decode_iv - decode an interrupt vector
  269. *
  270. * @adev: amdgpu_device pointer
  271. *
  272. * Decodes the interrupt vector at the current rptr
  273. * position and also advance the position.
  274. */
  275. static void vega10_ih_decode_iv(struct amdgpu_device *adev,
  276. struct amdgpu_iv_entry *entry)
  277. {
  278. /* wptr/rptr are in bytes! */
  279. u32 ring_index = adev->irq.ih.rptr >> 2;
  280. uint32_t dw[8];
  281. dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  282. dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
  283. dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
  284. dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  285. dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  286. dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  287. dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]);
  288. dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]);
  289. entry->client_id = dw[0] & 0xff;
  290. entry->src_id = (dw[0] >> 8) & 0xff;
  291. entry->ring_id = (dw[0] >> 16) & 0xff;
  292. entry->vm_id = (dw[0] >> 24) & 0xf;
  293. entry->vm_id_src = (dw[0] >> 31);
  294. entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
  295. entry->timestamp_src = dw[2] >> 31;
  296. entry->pas_id = dw[3] & 0xffff;
  297. entry->pasid_src = dw[3] >> 31;
  298. entry->src_data[0] = dw[4];
  299. entry->src_data[1] = dw[5];
  300. entry->src_data[2] = dw[6];
  301. entry->src_data[3] = dw[7];
  302. /* wptr/rptr are in bytes! */
  303. adev->irq.ih.rptr += 32;
  304. }
  305. /**
  306. * vega10_ih_set_rptr - set the IH ring buffer rptr
  307. *
  308. * @adev: amdgpu_device pointer
  309. *
  310. * Set the IH ring buffer rptr.
  311. */
  312. static void vega10_ih_set_rptr(struct amdgpu_device *adev)
  313. {
  314. if (adev->irq.ih.use_doorbell) {
  315. /* XXX check if swapping is necessary on BE */
  316. if (adev->irq.ih.use_bus_addr)
  317. adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  318. else
  319. adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  320. WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
  321. } else {
  322. WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr);
  323. }
  324. }
  325. static int vega10_ih_early_init(void *handle)
  326. {
  327. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  328. vega10_ih_set_interrupt_funcs(adev);
  329. return 0;
  330. }
  331. static int vega10_ih_sw_init(void *handle)
  332. {
  333. int r;
  334. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  335. r = amdgpu_ih_ring_init(adev, 256 * 1024, true);
  336. if (r)
  337. return r;
  338. adev->irq.ih.use_doorbell = true;
  339. adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
  340. adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
  341. if (!adev->irq.ih.faults)
  342. return -ENOMEM;
  343. INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
  344. AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
  345. spin_lock_init(&adev->irq.ih.faults->lock);
  346. adev->irq.ih.faults->count = 0;
  347. r = amdgpu_irq_init(adev);
  348. return r;
  349. }
  350. static int vega10_ih_sw_fini(void *handle)
  351. {
  352. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  353. amdgpu_irq_fini(adev);
  354. amdgpu_ih_ring_fini(adev);
  355. kfree(adev->irq.ih.faults);
  356. adev->irq.ih.faults = NULL;
  357. return 0;
  358. }
  359. static int vega10_ih_hw_init(void *handle)
  360. {
  361. int r;
  362. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  363. r = vega10_ih_irq_init(adev);
  364. if (r)
  365. return r;
  366. return 0;
  367. }
  368. static int vega10_ih_hw_fini(void *handle)
  369. {
  370. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  371. vega10_ih_irq_disable(adev);
  372. return 0;
  373. }
  374. static int vega10_ih_suspend(void *handle)
  375. {
  376. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  377. return vega10_ih_hw_fini(adev);
  378. }
  379. static int vega10_ih_resume(void *handle)
  380. {
  381. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  382. return vega10_ih_hw_init(adev);
  383. }
  384. static bool vega10_ih_is_idle(void *handle)
  385. {
  386. /* todo */
  387. return true;
  388. }
  389. static int vega10_ih_wait_for_idle(void *handle)
  390. {
  391. /* todo */
  392. return -ETIMEDOUT;
  393. }
  394. static int vega10_ih_soft_reset(void *handle)
  395. {
  396. /* todo */
  397. return 0;
  398. }
  399. static int vega10_ih_set_clockgating_state(void *handle,
  400. enum amd_clockgating_state state)
  401. {
  402. return 0;
  403. }
  404. static int vega10_ih_set_powergating_state(void *handle,
  405. enum amd_powergating_state state)
  406. {
  407. return 0;
  408. }
  409. const struct amd_ip_funcs vega10_ih_ip_funcs = {
  410. .name = "vega10_ih",
  411. .early_init = vega10_ih_early_init,
  412. .late_init = NULL,
  413. .sw_init = vega10_ih_sw_init,
  414. .sw_fini = vega10_ih_sw_fini,
  415. .hw_init = vega10_ih_hw_init,
  416. .hw_fini = vega10_ih_hw_fini,
  417. .suspend = vega10_ih_suspend,
  418. .resume = vega10_ih_resume,
  419. .is_idle = vega10_ih_is_idle,
  420. .wait_for_idle = vega10_ih_wait_for_idle,
  421. .soft_reset = vega10_ih_soft_reset,
  422. .set_clockgating_state = vega10_ih_set_clockgating_state,
  423. .set_powergating_state = vega10_ih_set_powergating_state,
  424. };
  425. static const struct amdgpu_ih_funcs vega10_ih_funcs = {
  426. .get_wptr = vega10_ih_get_wptr,
  427. .prescreen_iv = vega10_ih_prescreen_iv,
  428. .decode_iv = vega10_ih_decode_iv,
  429. .set_rptr = vega10_ih_set_rptr
  430. };
  431. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
  432. {
  433. if (adev->irq.ih_funcs == NULL)
  434. adev->irq.ih_funcs = &vega10_ih_funcs;
  435. }
  436. const struct amdgpu_ip_block_version vega10_ih_ip_block =
  437. {
  438. .type = AMD_IP_BLOCK_TYPE_IH,
  439. .major = 4,
  440. .minor = 0,
  441. .rev = 0,
  442. .funcs = &vega10_ih_ip_funcs,
  443. };