amdgpu_gmc.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * Copyright 2018 Advanced Micro Devices, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. #ifndef __AMDGPU_GMC_H__
  27. #define __AMDGPU_GMC_H__
  28. #include <linux/types.h>
  29. #include "amdgpu_irq.h"
  30. /* VA hole for 48bit addresses on Vega10 */
  31. #define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
  32. #define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
  33. /*
  34. * Hardware is programmed as if the hole doesn't exists with start and end
  35. * address values.
  36. *
  37. * This mask is used to remove the upper 16bits of the VA and so come up with
  38. * the linear addr value.
  39. */
  40. #define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
  41. struct firmware;
  42. /*
  43. * VMHUB structures, functions & helpers
  44. */
  45. struct amdgpu_vmhub {
  46. uint32_t ctx0_ptb_addr_lo32;
  47. uint32_t ctx0_ptb_addr_hi32;
  48. uint32_t vm_inv_eng0_req;
  49. uint32_t vm_inv_eng0_ack;
  50. uint32_t vm_context0_cntl;
  51. uint32_t vm_l2_pro_fault_status;
  52. uint32_t vm_l2_pro_fault_cntl;
  53. };
  54. /*
  55. * GPU MC structures, functions & helpers
  56. */
  57. struct amdgpu_gmc_funcs {
  58. /* flush the vm tlb via mmio */
  59. void (*flush_gpu_tlb)(struct amdgpu_device *adev,
  60. uint32_t vmid);
  61. /* flush the vm tlb via ring */
  62. uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
  63. uint64_t pd_addr);
  64. /* Change the VMID -> PASID mapping */
  65. void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
  66. unsigned pasid);
  67. /* write pte/pde updates using the cpu */
  68. int (*set_pte_pde)(struct amdgpu_device *adev,
  69. void *cpu_pt_addr, /* cpu addr of page table */
  70. uint32_t gpu_page_idx, /* pte/pde to update */
  71. uint64_t addr, /* addr to write into pte/pde */
  72. uint64_t flags); /* access flags */
  73. /* enable/disable PRT support */
  74. void (*set_prt)(struct amdgpu_device *adev, bool enable);
  75. /* set pte flags based per asic */
  76. uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
  77. uint32_t flags);
  78. /* get the pde for a given mc addr */
  79. void (*get_vm_pde)(struct amdgpu_device *adev, int level,
  80. u64 *dst, u64 *flags);
  81. };
  82. struct amdgpu_xgmi {
  83. /* from psp */
  84. u64 device_id;
  85. u64 hive_id;
  86. /* fixed per family */
  87. u64 node_segment_size;
  88. /* physical node (0-3) */
  89. unsigned physical_node_id;
  90. /* number of nodes (0-4) */
  91. unsigned num_physical_nodes;
  92. /* gpu list in the same hive */
  93. struct list_head head;
  94. };
  95. struct amdgpu_gmc {
  96. resource_size_t aper_size;
  97. resource_size_t aper_base;
  98. /* for some chips with <= 32MB we need to lie
  99. * about vram size near mc fb location */
  100. u64 mc_vram_size;
  101. u64 visible_vram_size;
  102. u64 agp_size;
  103. u64 agp_start;
  104. u64 agp_end;
  105. u64 gart_size;
  106. u64 gart_start;
  107. u64 gart_end;
  108. u64 vram_start;
  109. u64 vram_end;
  110. /* FB region , it's same as local vram region in single GPU, in XGMI
  111. * configuration, this region covers all GPUs in the same hive ,
  112. * each GPU in the hive has the same view of this FB region .
  113. * GPU0's vram starts at offset (0 * segment size) ,
  114. * GPU1 starts at offset (1 * segment size), etc.
  115. */
  116. u64 fb_start;
  117. u64 fb_end;
  118. unsigned vram_width;
  119. u64 real_vram_size;
  120. int vram_mtrr;
  121. u64 mc_mask;
  122. const struct firmware *fw; /* MC firmware */
  123. uint32_t fw_version;
  124. struct amdgpu_irq_src vm_fault;
  125. uint32_t vram_type;
  126. uint32_t srbm_soft_reset;
  127. bool prt_warning;
  128. uint64_t stolen_size;
  129. /* apertures */
  130. u64 shared_aperture_start;
  131. u64 shared_aperture_end;
  132. u64 private_aperture_start;
  133. u64 private_aperture_end;
  134. /* protects concurrent invalidation */
  135. spinlock_t invalidate_lock;
  136. bool translate_further;
  137. struct kfd_vm_fault_info *vm_fault_info;
  138. atomic_t vm_fault_info_updated;
  139. const struct amdgpu_gmc_funcs *gmc_funcs;
  140. struct amdgpu_xgmi xgmi;
  141. };
  142. #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
  143. #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
  144. #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
  145. #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
  146. #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
  147. #define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
  148. /**
  149. * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
  150. *
  151. * @adev: amdgpu_device pointer
  152. *
  153. * Returns:
  154. * True if full VRAM is visible through the BAR
  155. */
  156. static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
  157. {
  158. WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
  159. return (gmc->real_vram_size == gmc->visible_vram_size);
  160. }
  161. /**
  162. * amdgpu_gmc_sign_extend - sign extend the given gmc address
  163. *
  164. * @addr: address to extend
  165. */
  166. static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
  167. {
  168. if (addr >= AMDGPU_GMC_HOLE_START)
  169. addr |= AMDGPU_GMC_HOLE_END;
  170. return addr;
  171. }
  172. void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
  173. uint64_t *addr, uint64_t *flags);
  174. uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
  175. uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
  176. void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
  177. u64 base);
  178. void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
  179. struct amdgpu_gmc *mc);
  180. void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
  181. struct amdgpu_gmc *mc);
  182. #endif