vega10_ih.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_ih.h"
  26. #include "soc15.h"
  27. #include "oss/osssys_4_0_offset.h"
  28. #include "oss/osssys_4_0_sh_mask.h"
  29. #include "soc15_common.h"
  30. #include "vega10_ih.h"
  31. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
  32. /**
  33. * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
  34. *
  35. * @adev: amdgpu_device pointer
  36. *
  37. * Enable the interrupt ring buffer (VEGA10).
  38. */
  39. static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
  40. {
  41. u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
  42. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
  43. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
  44. WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
  45. adev->irq.ih.enabled = true;
  46. }
  47. /**
  48. * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
  49. *
  50. * @adev: amdgpu_device pointer
  51. *
  52. * Disable the interrupt ring buffer (VEGA10).
  53. */
  54. static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
  55. {
  56. u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
  57. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
  58. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
  59. WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
  60. /* set rptr, wptr to 0 */
  61. WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
  62. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
  63. adev->irq.ih.enabled = false;
  64. adev->irq.ih.rptr = 0;
  65. }
  66. /**
  67. * vega10_ih_irq_init - init and enable the interrupt ring
  68. *
  69. * @adev: amdgpu_device pointer
  70. *
  71. * Allocate a ring buffer for the interrupt controller,
  72. * enable the RLC, disable interrupts, enable the IH
  73. * ring buffer and enable it (VI).
  74. * Called at device load and reume.
  75. * Returns 0 for success, errors for failure.
  76. */
  77. static int vega10_ih_irq_init(struct amdgpu_device *adev)
  78. {
  79. int ret = 0;
  80. int rb_bufsz;
  81. u32 ih_rb_cntl, ih_doorbell_rtpr;
  82. u32 tmp;
  83. u64 wptr_off;
  84. /* disable irqs */
  85. vega10_ih_disable_interrupts(adev);
  86. adev->nbio_funcs->ih_control(adev);
  87. ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
  88. /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
  89. if (adev->irq.ih.use_bus_addr) {
  90. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
  91. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
  92. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
  93. } else {
  94. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
  95. WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (adev->irq.ih.gpu_addr >> 40) & 0xff);
  96. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
  97. }
  98. rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
  99. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  100. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
  101. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
  102. /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
  103. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
  104. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
  105. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
  106. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
  107. if (adev->irq.msi_enabled)
  108. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
  109. WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
  110. /* set the writeback address whether it's enabled or not */
  111. if (adev->irq.ih.use_bus_addr)
  112. wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
  113. else
  114. wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
  115. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
  116. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
  117. /* set rptr, wptr to 0 */
  118. WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
  119. WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
  120. ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
  121. if (adev->irq.ih.use_doorbell) {
  122. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  123. OFFSET, adev->irq.ih.doorbell_index);
  124. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  125. ENABLE, 1);
  126. } else {
  127. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  128. ENABLE, 0);
  129. }
  130. WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
  131. adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
  132. adev->irq.ih.doorbell_index);
  133. tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
  134. tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
  135. CLIENT18_IS_STORM_CLIENT, 1);
  136. WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
  137. tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
  138. tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
  139. WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
  140. pci_set_master(adev->pdev);
  141. /* enable interrupts */
  142. vega10_ih_enable_interrupts(adev);
  143. return ret;
  144. }
  145. /**
  146. * vega10_ih_irq_disable - disable interrupts
  147. *
  148. * @adev: amdgpu_device pointer
  149. *
  150. * Disable interrupts on the hw (VEGA10).
  151. */
  152. static void vega10_ih_irq_disable(struct amdgpu_device *adev)
  153. {
  154. vega10_ih_disable_interrupts(adev);
  155. /* Wait and acknowledge irq */
  156. mdelay(1);
  157. }
  158. /**
  159. * vega10_ih_get_wptr - get the IH ring buffer wptr
  160. *
  161. * @adev: amdgpu_device pointer
  162. *
  163. * Get the IH ring buffer wptr from either the register
  164. * or the writeback memory buffer (VEGA10). Also check for
  165. * ring buffer overflow and deal with it.
  166. * Returns the value of the wptr.
  167. */
  168. static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
  169. {
  170. u32 wptr, tmp;
  171. if (adev->irq.ih.use_bus_addr)
  172. wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
  173. else
  174. wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
  175. if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
  176. wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
  177. /* When a ring buffer overflow happen start parsing interrupt
  178. * from the last not overwritten vector (wptr + 32). Hopefully
  179. * this should allow us to catchup.
  180. */
  181. tmp = (wptr + 32) & adev->irq.ih.ptr_mask;
  182. dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
  183. wptr, adev->irq.ih.rptr, tmp);
  184. adev->irq.ih.rptr = tmp;
  185. tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  186. tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  187. WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
  188. }
  189. return (wptr & adev->irq.ih.ptr_mask);
  190. }
  191. /**
  192. * vega10_ih_prescreen_iv - prescreen an interrupt vector
  193. *
  194. * @adev: amdgpu_device pointer
  195. *
  196. * Returns true if the interrupt vector should be further processed.
  197. */
  198. static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
  199. {
  200. u32 ring_index = adev->irq.ih.rptr >> 2;
  201. u32 dw0, dw3, dw4, dw5;
  202. u16 pasid;
  203. u64 addr, key;
  204. struct amdgpu_vm *vm;
  205. int r;
  206. dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  207. dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  208. dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  209. dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  210. /* Filter retry page faults, let only the first one pass. If
  211. * there are too many outstanding faults, ignore them until
  212. * some faults get cleared.
  213. */
  214. switch (dw0 & 0xff) {
  215. case AMDGPU_IH_CLIENTID_VMC:
  216. case AMDGPU_IH_CLIENTID_UTCL2:
  217. break;
  218. default:
  219. /* Not a VM fault */
  220. return true;
  221. }
  222. pasid = dw3 & 0xffff;
  223. /* No PASID, can't identify faulting process */
  224. if (!pasid)
  225. return true;
  226. /* Not a retry fault, check fault credit */
  227. if (!(dw5 & 0x80)) {
  228. if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
  229. goto ignore_iv;
  230. return true;
  231. }
  232. addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
  233. key = AMDGPU_VM_FAULT(pasid, addr);
  234. r = amdgpu_ih_add_fault(adev, key);
  235. /* Hash table is full or the fault is already being processed,
  236. * ignore further page faults
  237. */
  238. if (r != 0)
  239. goto ignore_iv;
  240. /* Track retry faults in per-VM fault FIFO. */
  241. spin_lock(&adev->vm_manager.pasid_lock);
  242. vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
  243. if (!vm) {
  244. /* VM not found, process it normally */
  245. spin_unlock(&adev->vm_manager.pasid_lock);
  246. amdgpu_ih_clear_fault(adev, key);
  247. return true;
  248. }
  249. /* No locking required with single writer and single reader */
  250. r = kfifo_put(&vm->faults, key);
  251. if (!r) {
  252. /* FIFO is full. Ignore it until there is space */
  253. spin_unlock(&adev->vm_manager.pasid_lock);
  254. amdgpu_ih_clear_fault(adev, key);
  255. goto ignore_iv;
  256. }
  257. spin_unlock(&adev->vm_manager.pasid_lock);
  258. /* It's the first fault for this address, process it normally */
  259. return true;
  260. ignore_iv:
  261. adev->irq.ih.rptr += 32;
  262. return false;
  263. }
  264. /**
  265. * vega10_ih_decode_iv - decode an interrupt vector
  266. *
  267. * @adev: amdgpu_device pointer
  268. *
  269. * Decodes the interrupt vector at the current rptr
  270. * position and also advance the position.
  271. */
  272. static void vega10_ih_decode_iv(struct amdgpu_device *adev,
  273. struct amdgpu_iv_entry *entry)
  274. {
  275. /* wptr/rptr are in bytes! */
  276. u32 ring_index = adev->irq.ih.rptr >> 2;
  277. uint32_t dw[8];
  278. dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  279. dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
  280. dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
  281. dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  282. dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  283. dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  284. dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]);
  285. dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]);
  286. entry->client_id = dw[0] & 0xff;
  287. entry->src_id = (dw[0] >> 8) & 0xff;
  288. entry->ring_id = (dw[0] >> 16) & 0xff;
  289. entry->vmid = (dw[0] >> 24) & 0xf;
  290. entry->vmid_src = (dw[0] >> 31);
  291. entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
  292. entry->timestamp_src = dw[2] >> 31;
  293. entry->pas_id = dw[3] & 0xffff;
  294. entry->pasid_src = dw[3] >> 31;
  295. entry->src_data[0] = dw[4];
  296. entry->src_data[1] = dw[5];
  297. entry->src_data[2] = dw[6];
  298. entry->src_data[3] = dw[7];
  299. /* wptr/rptr are in bytes! */
  300. adev->irq.ih.rptr += 32;
  301. }
  302. /**
  303. * vega10_ih_set_rptr - set the IH ring buffer rptr
  304. *
  305. * @adev: amdgpu_device pointer
  306. *
  307. * Set the IH ring buffer rptr.
  308. */
  309. static void vega10_ih_set_rptr(struct amdgpu_device *adev)
  310. {
  311. if (adev->irq.ih.use_doorbell) {
  312. /* XXX check if swapping is necessary on BE */
  313. if (adev->irq.ih.use_bus_addr)
  314. adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  315. else
  316. adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  317. WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
  318. } else {
  319. WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr);
  320. }
  321. }
  322. static int vega10_ih_early_init(void *handle)
  323. {
  324. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  325. vega10_ih_set_interrupt_funcs(adev);
  326. return 0;
  327. }
  328. static int vega10_ih_sw_init(void *handle)
  329. {
  330. int r;
  331. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  332. r = amdgpu_ih_ring_init(adev, 256 * 1024, true);
  333. if (r)
  334. return r;
  335. adev->irq.ih.use_doorbell = true;
  336. adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
  337. adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
  338. if (!adev->irq.ih.faults)
  339. return -ENOMEM;
  340. INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
  341. AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
  342. spin_lock_init(&adev->irq.ih.faults->lock);
  343. adev->irq.ih.faults->count = 0;
  344. r = amdgpu_irq_init(adev);
  345. return r;
  346. }
  347. static int vega10_ih_sw_fini(void *handle)
  348. {
  349. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  350. amdgpu_irq_fini(adev);
  351. amdgpu_ih_ring_fini(adev);
  352. kfree(adev->irq.ih.faults);
  353. adev->irq.ih.faults = NULL;
  354. return 0;
  355. }
  356. static int vega10_ih_hw_init(void *handle)
  357. {
  358. int r;
  359. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  360. r = vega10_ih_irq_init(adev);
  361. if (r)
  362. return r;
  363. return 0;
  364. }
  365. static int vega10_ih_hw_fini(void *handle)
  366. {
  367. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  368. vega10_ih_irq_disable(adev);
  369. return 0;
  370. }
  371. static int vega10_ih_suspend(void *handle)
  372. {
  373. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  374. return vega10_ih_hw_fini(adev);
  375. }
  376. static int vega10_ih_resume(void *handle)
  377. {
  378. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  379. return vega10_ih_hw_init(adev);
  380. }
  381. static bool vega10_ih_is_idle(void *handle)
  382. {
  383. /* todo */
  384. return true;
  385. }
  386. static int vega10_ih_wait_for_idle(void *handle)
  387. {
  388. /* todo */
  389. return -ETIMEDOUT;
  390. }
  391. static int vega10_ih_soft_reset(void *handle)
  392. {
  393. /* todo */
  394. return 0;
  395. }
  396. static int vega10_ih_set_clockgating_state(void *handle,
  397. enum amd_clockgating_state state)
  398. {
  399. return 0;
  400. }
  401. static int vega10_ih_set_powergating_state(void *handle,
  402. enum amd_powergating_state state)
  403. {
  404. return 0;
  405. }
  406. const struct amd_ip_funcs vega10_ih_ip_funcs = {
  407. .name = "vega10_ih",
  408. .early_init = vega10_ih_early_init,
  409. .late_init = NULL,
  410. .sw_init = vega10_ih_sw_init,
  411. .sw_fini = vega10_ih_sw_fini,
  412. .hw_init = vega10_ih_hw_init,
  413. .hw_fini = vega10_ih_hw_fini,
  414. .suspend = vega10_ih_suspend,
  415. .resume = vega10_ih_resume,
  416. .is_idle = vega10_ih_is_idle,
  417. .wait_for_idle = vega10_ih_wait_for_idle,
  418. .soft_reset = vega10_ih_soft_reset,
  419. .set_clockgating_state = vega10_ih_set_clockgating_state,
  420. .set_powergating_state = vega10_ih_set_powergating_state,
  421. };
  422. static const struct amdgpu_ih_funcs vega10_ih_funcs = {
  423. .get_wptr = vega10_ih_get_wptr,
  424. .prescreen_iv = vega10_ih_prescreen_iv,
  425. .decode_iv = vega10_ih_decode_iv,
  426. .set_rptr = vega10_ih_set_rptr
  427. };
  428. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
  429. {
  430. if (adev->irq.ih_funcs == NULL)
  431. adev->irq.ih_funcs = &vega10_ih_funcs;
  432. }
  433. const struct amdgpu_ip_block_version vega10_ih_ip_block =
  434. {
  435. .type = AMD_IP_BLOCK_TYPE_IH,
  436. .major = 4,
  437. .minor = 0,
  438. .rev = 0,
  439. .funcs = &vega10_ih_ip_funcs,
  440. };