amdgpu_amdkfd_gfx_v9.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /*
  2. * Copyright 2014-2018 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #define pr_fmt(fmt) "kfd2kgd: " fmt
  23. #include <linux/module.h>
  24. #include <linux/fdtable.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/firmware.h>
  27. #include <drm/drmP.h>
  28. #include "amdgpu.h"
  29. #include "amdgpu_amdkfd.h"
  30. #include "amdgpu_ucode.h"
  31. #include "soc15_hw_ip.h"
  32. #include "gc/gc_9_0_offset.h"
  33. #include "gc/gc_9_0_sh_mask.h"
  34. #include "vega10_enum.h"
  35. #include "sdma0/sdma0_4_0_offset.h"
  36. #include "sdma0/sdma0_4_0_sh_mask.h"
  37. #include "sdma1/sdma1_4_0_offset.h"
  38. #include "sdma1/sdma1_4_0_sh_mask.h"
  39. #include "athub/athub_1_0_offset.h"
  40. #include "athub/athub_1_0_sh_mask.h"
  41. #include "oss/osssys_4_0_offset.h"
  42. #include "oss/osssys_4_0_sh_mask.h"
  43. #include "soc15_common.h"
  44. #include "v9_structs.h"
  45. #include "soc15.h"
  46. #include "soc15d.h"
  47. /* HACK: MMHUB and GC both have VM-related register with the same
  48. * names but different offsets. Define the MMHUB register we need here
  49. * with a prefix. A proper solution would be to move the functions
  50. * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
  51. * respectively.
  52. */
  53. #define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3
  54. #define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0
  55. #define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705
  56. #define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0
  57. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b
  58. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
  59. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c
  60. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
  61. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b
  62. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
  63. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c
  64. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
  65. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b
  66. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
  67. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c
  68. #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
  69. #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727
  70. #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
  71. #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728
  72. #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
  73. #define V9_PIPE_PER_MEC (4)
  74. #define V9_QUEUES_PER_PIPE_MEC (8)
  75. enum hqd_dequeue_request_type {
  76. NO_ACTION = 0,
  77. DRAIN_PIPE,
  78. RESET_WAVES
  79. };
  80. /*
  81. * Register access functions
  82. */
  83. static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
  84. uint32_t sh_mem_config,
  85. uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
  86. uint32_t sh_mem_bases);
  87. static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
  88. unsigned int vmid);
  89. static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
  90. static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
  91. uint32_t queue_id, uint32_t __user *wptr,
  92. uint32_t wptr_shift, uint32_t wptr_mask,
  93. struct mm_struct *mm);
  94. static int kgd_hqd_dump(struct kgd_dev *kgd,
  95. uint32_t pipe_id, uint32_t queue_id,
  96. uint32_t (**dump)[2], uint32_t *n_regs);
  97. static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
  98. uint32_t __user *wptr, struct mm_struct *mm);
  99. static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
  100. uint32_t engine_id, uint32_t queue_id,
  101. uint32_t (**dump)[2], uint32_t *n_regs);
  102. static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
  103. uint32_t pipe_id, uint32_t queue_id);
  104. static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
  105. static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
  106. enum kfd_preempt_type reset_type,
  107. unsigned int utimeout, uint32_t pipe_id,
  108. uint32_t queue_id);
  109. static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
  110. unsigned int utimeout);
  111. static int kgd_address_watch_disable(struct kgd_dev *kgd);
  112. static int kgd_address_watch_execute(struct kgd_dev *kgd,
  113. unsigned int watch_point_id,
  114. uint32_t cntl_val,
  115. uint32_t addr_hi,
  116. uint32_t addr_lo);
  117. static int kgd_wave_control_execute(struct kgd_dev *kgd,
  118. uint32_t gfx_index_val,
  119. uint32_t sq_cmd);
  120. static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
  121. unsigned int watch_point_id,
  122. unsigned int reg_offset);
  123. static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
  124. uint8_t vmid);
  125. static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
  126. uint8_t vmid);
  127. static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
  128. uint64_t page_table_base);
  129. static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
  130. static void set_scratch_backing_va(struct kgd_dev *kgd,
  131. uint64_t va, uint32_t vmid);
  132. static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
  133. static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
  134. /* Because of REG_GET_FIELD() being used, we put this function in the
  135. * asic specific file.
  136. */
  137. static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
  138. struct tile_config *config)
  139. {
  140. struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
  141. config->gb_addr_config = adev->gfx.config.gb_addr_config;
  142. config->tile_config_ptr = adev->gfx.config.tile_mode_array;
  143. config->num_tile_configs =
  144. ARRAY_SIZE(adev->gfx.config.tile_mode_array);
  145. config->macro_tile_config_ptr =
  146. adev->gfx.config.macrotile_mode_array;
  147. config->num_macro_tile_configs =
  148. ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
  149. return 0;
  150. }
  151. static const struct kfd2kgd_calls kfd2kgd = {
  152. .init_gtt_mem_allocation = alloc_gtt_mem,
  153. .free_gtt_mem = free_gtt_mem,
  154. .get_local_mem_info = get_local_mem_info,
  155. .get_gpu_clock_counter = get_gpu_clock_counter,
  156. .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
  157. .alloc_pasid = amdgpu_pasid_alloc,
  158. .free_pasid = amdgpu_pasid_free,
  159. .program_sh_mem_settings = kgd_program_sh_mem_settings,
  160. .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
  161. .init_interrupts = kgd_init_interrupts,
  162. .hqd_load = kgd_hqd_load,
  163. .hqd_sdma_load = kgd_hqd_sdma_load,
  164. .hqd_dump = kgd_hqd_dump,
  165. .hqd_sdma_dump = kgd_hqd_sdma_dump,
  166. .hqd_is_occupied = kgd_hqd_is_occupied,
  167. .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
  168. .hqd_destroy = kgd_hqd_destroy,
  169. .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
  170. .address_watch_disable = kgd_address_watch_disable,
  171. .address_watch_execute = kgd_address_watch_execute,
  172. .wave_control_execute = kgd_wave_control_execute,
  173. .address_watch_get_offset = kgd_address_watch_get_offset,
  174. .get_atc_vmid_pasid_mapping_pasid =
  175. get_atc_vmid_pasid_mapping_pasid,
  176. .get_atc_vmid_pasid_mapping_valid =
  177. get_atc_vmid_pasid_mapping_valid,
  178. .get_fw_version = get_fw_version,
  179. .set_scratch_backing_va = set_scratch_backing_va,
  180. .get_tile_config = amdgpu_amdkfd_get_tile_config,
  181. .get_cu_info = get_cu_info,
  182. .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
  183. .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
  184. .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
  185. .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
  186. .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
  187. .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
  188. .set_vm_context_page_table_base = set_vm_context_page_table_base,
  189. .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
  190. .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
  191. .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
  192. .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
  193. .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
  194. .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
  195. .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
  196. .invalidate_tlbs = invalidate_tlbs,
  197. .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
  198. .submit_ib = amdgpu_amdkfd_submit_ib,
  199. .gpu_recover = amdgpu_amdkfd_gpu_reset,
  200. .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
  201. .get_hive_id = amdgpu_amdkfd_get_hive_id,
  202. };
  203. struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
  204. {
  205. return (struct kfd2kgd_calls *)&kfd2kgd;
  206. }
  207. static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
  208. {
  209. return (struct amdgpu_device *)kgd;
  210. }
  211. static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
  212. uint32_t queue, uint32_t vmid)
  213. {
  214. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  215. mutex_lock(&adev->srbm_mutex);
  216. soc15_grbm_select(adev, mec, pipe, queue, vmid);
  217. }
  218. static void unlock_srbm(struct kgd_dev *kgd)
  219. {
  220. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  221. soc15_grbm_select(adev, 0, 0, 0, 0);
  222. mutex_unlock(&adev->srbm_mutex);
  223. }
  224. static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
  225. uint32_t queue_id)
  226. {
  227. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  228. uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
  229. uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
  230. lock_srbm(kgd, mec, pipe, queue_id, 0);
  231. }
  232. static uint32_t get_queue_mask(struct amdgpu_device *adev,
  233. uint32_t pipe_id, uint32_t queue_id)
  234. {
  235. unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
  236. queue_id) & 31;
  237. return ((uint32_t)1) << bit;
  238. }
  239. static void release_queue(struct kgd_dev *kgd)
  240. {
  241. unlock_srbm(kgd);
  242. }
  243. static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
  244. uint32_t sh_mem_config,
  245. uint32_t sh_mem_ape1_base,
  246. uint32_t sh_mem_ape1_limit,
  247. uint32_t sh_mem_bases)
  248. {
  249. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  250. lock_srbm(kgd, 0, 0, 0, vmid);
  251. WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
  252. WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
  253. /* APE1 no longer exists on GFX9 */
  254. unlock_srbm(kgd);
  255. }
  256. static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
  257. unsigned int vmid)
  258. {
  259. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  260. /*
  261. * We have to assume that there is no outstanding mapping.
  262. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
  263. * a mapping is in progress or because a mapping finished
  264. * and the SW cleared it.
  265. * So the protocol is to always wait & clear.
  266. */
  267. uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
  268. ATC_VMID0_PASID_MAPPING__VALID_MASK;
  269. /*
  270. * need to do this twice, once for gfx and once for mmhub
  271. * for ATC add 16 to VMID for mmhub, for IH different registers.
  272. * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
  273. */
  274. WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
  275. pasid_mapping);
  276. while (!(RREG32(SOC15_REG_OFFSET(
  277. ATHUB, 0,
  278. mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
  279. (1U << vmid)))
  280. cpu_relax();
  281. WREG32(SOC15_REG_OFFSET(ATHUB, 0,
  282. mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
  283. 1U << vmid);
  284. /* Mapping vmid to pasid also for IH block */
  285. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
  286. pasid_mapping);
  287. WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
  288. pasid_mapping);
  289. while (!(RREG32(SOC15_REG_OFFSET(
  290. ATHUB, 0,
  291. mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
  292. (1U << (vmid + 16))))
  293. cpu_relax();
  294. WREG32(SOC15_REG_OFFSET(ATHUB, 0,
  295. mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
  296. 1U << (vmid + 16));
  297. /* Mapping vmid to pasid also for IH block */
  298. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
  299. pasid_mapping);
  300. return 0;
  301. }
  302. /* TODO - RING0 form of field is obsolete, seems to date back to SI
  303. * but still works
  304. */
  305. static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
  306. {
  307. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  308. uint32_t mec;
  309. uint32_t pipe;
  310. mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
  311. pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
  312. lock_srbm(kgd, mec, pipe, 0, 0);
  313. WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
  314. CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
  315. CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
  316. unlock_srbm(kgd);
  317. return 0;
  318. }
  319. static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
  320. unsigned int engine_id,
  321. unsigned int queue_id)
  322. {
  323. uint32_t base[2] = {
  324. SOC15_REG_OFFSET(SDMA0, 0,
  325. mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
  326. SOC15_REG_OFFSET(SDMA1, 0,
  327. mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
  328. };
  329. uint32_t retval;
  330. retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
  331. mmSDMA0_RLC0_RB_CNTL);
  332. pr_debug("sdma base address: 0x%x\n", retval);
  333. return retval;
  334. }
  335. static inline struct v9_mqd *get_mqd(void *mqd)
  336. {
  337. return (struct v9_mqd *)mqd;
  338. }
  339. static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
  340. {
  341. return (struct v9_sdma_mqd *)mqd;
  342. }
  343. static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
  344. uint32_t queue_id, uint32_t __user *wptr,
  345. uint32_t wptr_shift, uint32_t wptr_mask,
  346. struct mm_struct *mm)
  347. {
  348. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  349. struct v9_mqd *m;
  350. uint32_t *mqd_hqd;
  351. uint32_t reg, hqd_base, data;
  352. m = get_mqd(mqd);
  353. acquire_queue(kgd, pipe_id, queue_id);
  354. /* HIQ is set during driver init period with vmid set to 0*/
  355. if (m->cp_hqd_vmid == 0) {
  356. uint32_t value, mec, pipe;
  357. mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
  358. pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
  359. pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
  360. mec, pipe, queue_id);
  361. value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
  362. value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
  363. ((mec << 5) | (pipe << 3) | queue_id | 0x80));
  364. WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
  365. }
  366. /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
  367. mqd_hqd = &m->cp_mqd_base_addr_lo;
  368. hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
  369. for (reg = hqd_base;
  370. reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
  371. WREG32(reg, mqd_hqd[reg - hqd_base]);
  372. /* Activate doorbell logic before triggering WPTR poll. */
  373. data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
  374. CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
  375. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
  376. if (wptr) {
  377. /* Don't read wptr with get_user because the user
  378. * context may not be accessible (if this function
  379. * runs in a work queue). Instead trigger a one-shot
  380. * polling read from memory in the CP. This assumes
  381. * that wptr is GPU-accessible in the queue's VMID via
  382. * ATC or SVM. WPTR==RPTR before starting the poll so
  383. * the CP starts fetching new commands from the right
  384. * place.
  385. *
  386. * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
  387. * tricky. Assume that the queue didn't overflow. The
  388. * number of valid bits in the 32-bit RPTR depends on
  389. * the queue size. The remaining bits are taken from
  390. * the saved 64-bit WPTR. If the WPTR wrapped, add the
  391. * queue size.
  392. */
  393. uint32_t queue_size =
  394. 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
  395. CP_HQD_PQ_CONTROL, QUEUE_SIZE);
  396. uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
  397. if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
  398. guessed_wptr += queue_size;
  399. guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
  400. guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
  401. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
  402. lower_32_bits(guessed_wptr));
  403. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
  404. upper_32_bits(guessed_wptr));
  405. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
  406. lower_32_bits((uintptr_t)wptr));
  407. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
  408. upper_32_bits((uintptr_t)wptr));
  409. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
  410. get_queue_mask(adev, pipe_id, queue_id));
  411. }
  412. /* Start the EOP fetcher */
  413. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
  414. REG_SET_FIELD(m->cp_hqd_eop_rptr,
  415. CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
  416. data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
  417. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
  418. release_queue(kgd);
  419. return 0;
  420. }
  421. static int kgd_hqd_dump(struct kgd_dev *kgd,
  422. uint32_t pipe_id, uint32_t queue_id,
  423. uint32_t (**dump)[2], uint32_t *n_regs)
  424. {
  425. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  426. uint32_t i = 0, reg;
  427. #define HQD_N_REGS 56
  428. #define DUMP_REG(addr) do { \
  429. if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
  430. break; \
  431. (*dump)[i][0] = (addr) << 2; \
  432. (*dump)[i++][1] = RREG32(addr); \
  433. } while (0)
  434. *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
  435. if (*dump == NULL)
  436. return -ENOMEM;
  437. acquire_queue(kgd, pipe_id, queue_id);
  438. for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
  439. reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
  440. DUMP_REG(reg);
  441. release_queue(kgd);
  442. WARN_ON_ONCE(i != HQD_N_REGS);
  443. *n_regs = i;
  444. return 0;
  445. }
  446. static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
  447. uint32_t __user *wptr, struct mm_struct *mm)
  448. {
  449. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  450. struct v9_sdma_mqd *m;
  451. uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
  452. unsigned long end_jiffies;
  453. uint32_t data;
  454. uint64_t data64;
  455. uint64_t __user *wptr64 = (uint64_t __user *)wptr;
  456. m = get_sdma_mqd(mqd);
  457. sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
  458. m->sdma_queue_id);
  459. sdmax_gfx_context_cntl = m->sdma_engine_id ?
  460. SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
  461. SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
  462. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
  463. m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
  464. end_jiffies = msecs_to_jiffies(2000) + jiffies;
  465. while (true) {
  466. data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
  467. if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
  468. break;
  469. if (time_after(jiffies, end_jiffies))
  470. return -ETIME;
  471. usleep_range(500, 1000);
  472. }
  473. data = RREG32(sdmax_gfx_context_cntl);
  474. data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
  475. RESUME_CTX, 0);
  476. WREG32(sdmax_gfx_context_cntl, data);
  477. WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
  478. m->sdmax_rlcx_doorbell_offset);
  479. data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
  480. ENABLE, 1);
  481. WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
  482. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
  483. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
  484. m->sdmax_rlcx_rb_rptr_hi);
  485. WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
  486. if (read_user_wptr(mm, wptr64, data64)) {
  487. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
  488. lower_32_bits(data64));
  489. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
  490. upper_32_bits(data64));
  491. } else {
  492. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
  493. m->sdmax_rlcx_rb_rptr);
  494. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
  495. m->sdmax_rlcx_rb_rptr_hi);
  496. }
  497. WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
  498. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
  499. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
  500. m->sdmax_rlcx_rb_base_hi);
  501. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
  502. m->sdmax_rlcx_rb_rptr_addr_lo);
  503. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
  504. m->sdmax_rlcx_rb_rptr_addr_hi);
  505. data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
  506. RB_ENABLE, 1);
  507. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
  508. return 0;
  509. }
  510. static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
  511. uint32_t engine_id, uint32_t queue_id,
  512. uint32_t (**dump)[2], uint32_t *n_regs)
  513. {
  514. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  515. uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
  516. uint32_t i = 0, reg;
  517. #undef HQD_N_REGS
  518. #define HQD_N_REGS (19+6+7+10)
  519. *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
  520. if (*dump == NULL)
  521. return -ENOMEM;
  522. for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
  523. DUMP_REG(sdma_base_addr + reg);
  524. for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
  525. DUMP_REG(sdma_base_addr + reg);
  526. for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
  527. reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
  528. DUMP_REG(sdma_base_addr + reg);
  529. for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
  530. reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
  531. DUMP_REG(sdma_base_addr + reg);
  532. WARN_ON_ONCE(i != HQD_N_REGS);
  533. *n_regs = i;
  534. return 0;
  535. }
  536. static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
  537. uint32_t pipe_id, uint32_t queue_id)
  538. {
  539. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  540. uint32_t act;
  541. bool retval = false;
  542. uint32_t low, high;
  543. acquire_queue(kgd, pipe_id, queue_id);
  544. act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
  545. if (act) {
  546. low = lower_32_bits(queue_address >> 8);
  547. high = upper_32_bits(queue_address >> 8);
  548. if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
  549. high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
  550. retval = true;
  551. }
  552. release_queue(kgd);
  553. return retval;
  554. }
  555. static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
  556. {
  557. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  558. struct v9_sdma_mqd *m;
  559. uint32_t sdma_base_addr;
  560. uint32_t sdma_rlc_rb_cntl;
  561. m = get_sdma_mqd(mqd);
  562. sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
  563. m->sdma_queue_id);
  564. sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
  565. if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
  566. return true;
  567. return false;
  568. }
  569. static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
  570. enum kfd_preempt_type reset_type,
  571. unsigned int utimeout, uint32_t pipe_id,
  572. uint32_t queue_id)
  573. {
  574. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  575. enum hqd_dequeue_request_type type;
  576. unsigned long end_jiffies;
  577. uint32_t temp;
  578. struct v9_mqd *m = get_mqd(mqd);
  579. if (adev->in_gpu_reset)
  580. return -EIO;
  581. acquire_queue(kgd, pipe_id, queue_id);
  582. if (m->cp_hqd_vmid == 0)
  583. WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
  584. switch (reset_type) {
  585. case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
  586. type = DRAIN_PIPE;
  587. break;
  588. case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
  589. type = RESET_WAVES;
  590. break;
  591. default:
  592. type = DRAIN_PIPE;
  593. break;
  594. }
  595. WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
  596. end_jiffies = (utimeout * HZ / 1000) + jiffies;
  597. while (true) {
  598. temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
  599. if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
  600. break;
  601. if (time_after(jiffies, end_jiffies)) {
  602. pr_err("cp queue preemption time out.\n");
  603. release_queue(kgd);
  604. return -ETIME;
  605. }
  606. usleep_range(500, 1000);
  607. }
  608. release_queue(kgd);
  609. return 0;
  610. }
  611. static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
  612. unsigned int utimeout)
  613. {
  614. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  615. struct v9_sdma_mqd *m;
  616. uint32_t sdma_base_addr;
  617. uint32_t temp;
  618. unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
  619. m = get_sdma_mqd(mqd);
  620. sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
  621. m->sdma_queue_id);
  622. temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
  623. temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
  624. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
  625. while (true) {
  626. temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
  627. if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
  628. break;
  629. if (time_after(jiffies, end_jiffies))
  630. return -ETIME;
  631. usleep_range(500, 1000);
  632. }
  633. WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
  634. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
  635. RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
  636. SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
  637. m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
  638. m->sdmax_rlcx_rb_rptr_hi =
  639. RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
  640. return 0;
  641. }
  642. static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
  643. uint8_t vmid)
  644. {
  645. uint32_t reg;
  646. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  647. reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
  648. + vmid);
  649. return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
  650. }
  651. static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
  652. uint8_t vmid)
  653. {
  654. uint32_t reg;
  655. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  656. reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
  657. + vmid);
  658. return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
  659. }
  660. static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
  661. {
  662. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  663. uint32_t req = (1 << vmid) |
  664. (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
  665. VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
  666. VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
  667. VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
  668. VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
  669. VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
  670. mutex_lock(&adev->srbm_mutex);
  671. /* Use legacy mode tlb invalidation.
  672. *
  673. * Currently on Raven the code below is broken for anything but
  674. * legacy mode due to a MMHUB power gating problem. A workaround
  675. * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
  676. * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
  677. * bit.
  678. *
  679. * TODO 1: agree on the right set of invalidation registers for
  680. * KFD use. Use the last one for now. Invalidate both GC and
  681. * MMHUB.
  682. *
  683. * TODO 2: support range-based invalidation, requires kfg2kgd
  684. * interface change
  685. */
  686. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
  687. 0xffffffff);
  688. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
  689. 0x0000001f);
  690. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  691. mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
  692. 0xffffffff);
  693. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  694. mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
  695. 0x0000001f);
  696. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
  697. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
  698. req);
  699. while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
  700. (1 << vmid)))
  701. cpu_relax();
  702. while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
  703. mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
  704. (1 << vmid)))
  705. cpu_relax();
  706. mutex_unlock(&adev->srbm_mutex);
  707. }
  708. static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
  709. {
  710. signed long r;
  711. uint32_t seq;
  712. struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
  713. spin_lock(&adev->gfx.kiq.ring_lock);
  714. amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
  715. amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
  716. amdgpu_ring_write(ring,
  717. PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
  718. PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
  719. PACKET3_INVALIDATE_TLBS_PASID(pasid) |
  720. PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(0)); /* legacy */
  721. amdgpu_fence_emit_polling(ring, &seq);
  722. amdgpu_ring_commit(ring);
  723. spin_unlock(&adev->gfx.kiq.ring_lock);
  724. r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
  725. if (r < 1) {
  726. DRM_ERROR("wait for kiq fence error: %ld.\n", r);
  727. return -ETIME;
  728. }
  729. return 0;
  730. }
  731. static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
  732. {
  733. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  734. int vmid;
  735. struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
  736. if (adev->in_gpu_reset)
  737. return -EIO;
  738. if (ring->ready)
  739. return invalidate_tlbs_with_kiq(adev, pasid);
  740. for (vmid = 0; vmid < 16; vmid++) {
  741. if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
  742. continue;
  743. if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
  744. if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
  745. == pasid) {
  746. write_vmid_invalidate_request(kgd, vmid);
  747. break;
  748. }
  749. }
  750. }
  751. return 0;
  752. }
  753. static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
  754. {
  755. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  756. if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
  757. pr_err("non kfd vmid %d\n", vmid);
  758. return 0;
  759. }
  760. write_vmid_invalidate_request(kgd, vmid);
  761. return 0;
  762. }
  763. static int kgd_address_watch_disable(struct kgd_dev *kgd)
  764. {
  765. return 0;
  766. }
  767. static int kgd_address_watch_execute(struct kgd_dev *kgd,
  768. unsigned int watch_point_id,
  769. uint32_t cntl_val,
  770. uint32_t addr_hi,
  771. uint32_t addr_lo)
  772. {
  773. return 0;
  774. }
  775. static int kgd_wave_control_execute(struct kgd_dev *kgd,
  776. uint32_t gfx_index_val,
  777. uint32_t sq_cmd)
  778. {
  779. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  780. uint32_t data = 0;
  781. mutex_lock(&adev->grbm_idx_mutex);
  782. WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
  783. WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
  784. data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
  785. INSTANCE_BROADCAST_WRITES, 1);
  786. data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
  787. SH_BROADCAST_WRITES, 1);
  788. data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
  789. SE_BROADCAST_WRITES, 1);
  790. WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
  791. mutex_unlock(&adev->grbm_idx_mutex);
  792. return 0;
  793. }
  794. static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
  795. unsigned int watch_point_id,
  796. unsigned int reg_offset)
  797. {
  798. return 0;
  799. }
  800. static void set_scratch_backing_va(struct kgd_dev *kgd,
  801. uint64_t va, uint32_t vmid)
  802. {
  803. /* No longer needed on GFXv9. The scratch base address is
  804. * passed to the shader by the CP. It's the user mode driver's
  805. * responsibility.
  806. */
  807. }
  808. /* FIXME: Does this need to be ASIC-specific code? */
  809. static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
  810. {
  811. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  812. const union amdgpu_firmware_header *hdr;
  813. switch (type) {
  814. case KGD_ENGINE_PFP:
  815. hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data;
  816. break;
  817. case KGD_ENGINE_ME:
  818. hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data;
  819. break;
  820. case KGD_ENGINE_CE:
  821. hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data;
  822. break;
  823. case KGD_ENGINE_MEC1:
  824. hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data;
  825. break;
  826. case KGD_ENGINE_MEC2:
  827. hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data;
  828. break;
  829. case KGD_ENGINE_RLC:
  830. hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data;
  831. break;
  832. case KGD_ENGINE_SDMA1:
  833. hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data;
  834. break;
  835. case KGD_ENGINE_SDMA2:
  836. hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data;
  837. break;
  838. default:
  839. return 0;
  840. }
  841. if (hdr == NULL)
  842. return 0;
  843. /* Only 12 bit in use*/
  844. return hdr->common.ucode_version;
  845. }
  846. static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
  847. uint64_t page_table_base)
  848. {
  849. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  850. uint64_t base = page_table_base | AMDGPU_PTE_VALID;
  851. if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
  852. pr_err("trying to set page table base for wrong VMID %u\n",
  853. vmid);
  854. return;
  855. }
  856. /* TODO: take advantage of per-process address space size. For
  857. * now, all processes share the same address space size, like
  858. * on GFX8 and older.
  859. */
  860. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
  861. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
  862. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
  863. lower_32_bits(adev->vm_manager.max_pfn - 1));
  864. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
  865. upper_32_bits(adev->vm_manager.max_pfn - 1));
  866. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
  867. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
  868. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
  869. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
  870. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
  871. lower_32_bits(adev->vm_manager.max_pfn - 1));
  872. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
  873. upper_32_bits(adev->vm_manager.max_pfn - 1));
  874. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
  875. WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
  876. }