nbio_v6_1.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #include "amdgpu_atombios.h"
  25. #include "nbio_v6_1.h"
  26. #include "nbio/nbio_6_1_default.h"
  27. #include "nbio/nbio_6_1_offset.h"
  28. #include "nbio/nbio_6_1_sh_mask.h"
  29. #include "vega10_enum.h"
  30. #define smnCPM_CONTROL 0x11180460
  31. #define smnPCIE_CNTL2 0x11180070
  32. #define smnPCIE_CONFIG_CNTL 0x11180044
  33. static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
  34. {
  35. u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
  36. tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
  37. tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
  38. return tmp;
  39. }
  40. static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
  41. {
  42. if (enable)
  43. WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
  44. BIF_FB_EN__FB_READ_EN_MASK |
  45. BIF_FB_EN__FB_WRITE_EN_MASK);
  46. else
  47. WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
  48. }
  49. static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
  50. {
  51. WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  52. }
  53. static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
  54. {
  55. return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
  56. }
  57. static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
  58. bool use_doorbell, int doorbell_index)
  59. {
  60. u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
  61. SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
  62. u32 doorbell_range = RREG32(reg);
  63. if (use_doorbell) {
  64. doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
  65. doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
  66. } else
  67. doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
  68. WREG32(reg, doorbell_range);
  69. }
  70. static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
  71. bool enable)
  72. {
  73. WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
  74. }
  75. static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
  76. bool enable)
  77. {
  78. u32 tmp = 0;
  79. if (enable) {
  80. tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
  81. REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
  82. REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
  83. WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
  84. lower_32_bits(adev->doorbell.base));
  85. WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
  86. upper_32_bits(adev->doorbell.base));
  87. }
  88. WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
  89. }
  90. static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
  91. bool use_doorbell, int doorbell_index)
  92. {
  93. u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
  94. if (use_doorbell) {
  95. ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
  96. ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
  97. } else
  98. ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
  99. WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
  100. }
  101. static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
  102. {
  103. u32 interrupt_cntl;
  104. /* setup interrupt control */
  105. WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
  106. interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
  107. /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
  108. * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
  109. */
  110. interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
  111. /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
  112. interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
  113. WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
  114. }
  115. static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  116. bool enable)
  117. {
  118. uint32_t def, data;
  119. def = data = RREG32_PCIE(smnCPM_CONTROL);
  120. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
  121. data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
  122. CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
  123. CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
  124. CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
  125. CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
  126. CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
  127. CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
  128. } else {
  129. data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
  130. CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
  131. CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
  132. CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
  133. CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
  134. CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
  135. CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
  136. }
  137. if (def != data)
  138. WREG32_PCIE(smnCPM_CONTROL, data);
  139. }
  140. static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  141. bool enable)
  142. {
  143. uint32_t def, data;
  144. def = data = RREG32_PCIE(smnPCIE_CNTL2);
  145. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
  146. data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
  147. PCIE_CNTL2__MST_MEM_LS_EN_MASK |
  148. PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
  149. } else {
  150. data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
  151. PCIE_CNTL2__MST_MEM_LS_EN_MASK |
  152. PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
  153. }
  154. if (def != data)
  155. WREG32_PCIE(smnPCIE_CNTL2, data);
  156. }
  157. static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
  158. u32 *flags)
  159. {
  160. int data;
  161. /* AMD_CG_SUPPORT_BIF_MGCG */
  162. data = RREG32_PCIE(smnCPM_CONTROL);
  163. if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
  164. *flags |= AMD_CG_SUPPORT_BIF_MGCG;
  165. /* AMD_CG_SUPPORT_BIF_LS */
  166. data = RREG32_PCIE(smnPCIE_CNTL2);
  167. if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
  168. *flags |= AMD_CG_SUPPORT_BIF_LS;
  169. }
  170. static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
  171. {
  172. return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
  173. }
  174. static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
  175. {
  176. return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
  177. }
  178. static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev)
  179. {
  180. return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
  181. }
  182. static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
  183. {
  184. return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
  185. }
  186. static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
  187. .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
  188. .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
  189. .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
  190. .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
  191. .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
  192. .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
  193. .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
  194. .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
  195. .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
  196. .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
  197. .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
  198. .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
  199. };
  200. static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
  201. {
  202. uint32_t reg;
  203. reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
  204. if (reg & 1)
  205. adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
  206. if (reg & 0x80000000)
  207. adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
  208. if (!reg) {
  209. if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
  210. adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
  211. }
  212. }
  213. static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
  214. {
  215. uint32_t def, data;
  216. def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
  217. data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
  218. data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
  219. if (def != data)
  220. WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
  221. }
  222. const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
  223. .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
  224. .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
  225. .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
  226. .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
  227. .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
  228. .get_rev_id = nbio_v6_1_get_rev_id,
  229. .mc_access_enable = nbio_v6_1_mc_access_enable,
  230. .hdp_flush = nbio_v6_1_hdp_flush,
  231. .get_memsize = nbio_v6_1_get_memsize,
  232. .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
  233. .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
  234. .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
  235. .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
  236. .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
  237. .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
  238. .get_clockgating_state = nbio_v6_1_get_clockgating_state,
  239. .ih_control = nbio_v6_1_ih_control,
  240. .init_registers = nbio_v6_1_init_registers,
  241. .detect_hw_virt = nbio_v6_1_detect_hw_virt,
  242. };