amdgpu_amdkfd_gfx_v8.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/fdtable.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/firmware.h>
  26. #include <drm/drmP.h>
  27. #include "amdgpu.h"
  28. #include "amdgpu_amdkfd.h"
  29. #include "amdgpu_ucode.h"
  30. #include "gfx_v8_0.h"
  31. #include "gca/gfx_8_0_sh_mask.h"
  32. #include "gca/gfx_8_0_d.h"
  33. #include "gca/gfx_8_0_enum.h"
  34. #include "oss/oss_3_0_sh_mask.h"
  35. #include "oss/oss_3_0_d.h"
  36. #include "gmc/gmc_8_1_sh_mask.h"
  37. #include "gmc/gmc_8_1_d.h"
  38. #include "vi_structs.h"
  39. #include "vid.h"
  40. enum hqd_dequeue_request_type {
  41. NO_ACTION = 0,
  42. DRAIN_PIPE,
  43. RESET_WAVES
  44. };
  45. struct cik_sdma_rlc_registers;
  46. /*
  47. * Register access functions
  48. */
  49. static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
  50. uint32_t sh_mem_config,
  51. uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
  52. uint32_t sh_mem_bases);
  53. static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
  54. unsigned int vmid);
  55. static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
  56. uint32_t hpd_size, uint64_t hpd_gpu_addr);
  57. static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
  58. static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
  59. uint32_t queue_id, uint32_t __user *wptr,
  60. uint32_t wptr_shift, uint32_t wptr_mask,
  61. struct mm_struct *mm);
  62. static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
  63. static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
  64. uint32_t pipe_id, uint32_t queue_id);
  65. static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
  66. static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
  67. enum kfd_preempt_type reset_type,
  68. unsigned int utimeout, uint32_t pipe_id,
  69. uint32_t queue_id);
  70. static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
  71. unsigned int utimeout);
  72. static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
  73. static int kgd_address_watch_disable(struct kgd_dev *kgd);
  74. static int kgd_address_watch_execute(struct kgd_dev *kgd,
  75. unsigned int watch_point_id,
  76. uint32_t cntl_val,
  77. uint32_t addr_hi,
  78. uint32_t addr_lo);
  79. static int kgd_wave_control_execute(struct kgd_dev *kgd,
  80. uint32_t gfx_index_val,
  81. uint32_t sq_cmd);
  82. static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
  83. unsigned int watch_point_id,
  84. unsigned int reg_offset);
  85. static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
  86. uint8_t vmid);
  87. static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
  88. uint8_t vmid);
  89. static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
  90. static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
  91. static void set_scratch_backing_va(struct kgd_dev *kgd,
  92. uint64_t va, uint32_t vmid);
  93. /* Because of REG_GET_FIELD() being used, we put this function in the
  94. * asic specific file.
  95. */
  96. static int get_tile_config(struct kgd_dev *kgd,
  97. struct tile_config *config)
  98. {
  99. struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
  100. config->gb_addr_config = adev->gfx.config.gb_addr_config;
  101. config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
  102. MC_ARB_RAMCFG, NOOFBANK);
  103. config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
  104. MC_ARB_RAMCFG, NOOFRANKS);
  105. config->tile_config_ptr = adev->gfx.config.tile_mode_array;
  106. config->num_tile_configs =
  107. ARRAY_SIZE(adev->gfx.config.tile_mode_array);
  108. config->macro_tile_config_ptr =
  109. adev->gfx.config.macrotile_mode_array;
  110. config->num_macro_tile_configs =
  111. ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
  112. return 0;
  113. }
  114. static const struct kfd2kgd_calls kfd2kgd = {
  115. .init_gtt_mem_allocation = alloc_gtt_mem,
  116. .free_gtt_mem = free_gtt_mem,
  117. .get_vmem_size = get_vmem_size,
  118. .get_gpu_clock_counter = get_gpu_clock_counter,
  119. .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
  120. .alloc_pasid = amdgpu_vm_alloc_pasid,
  121. .free_pasid = amdgpu_vm_free_pasid,
  122. .program_sh_mem_settings = kgd_program_sh_mem_settings,
  123. .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
  124. .init_pipeline = kgd_init_pipeline,
  125. .init_interrupts = kgd_init_interrupts,
  126. .hqd_load = kgd_hqd_load,
  127. .hqd_sdma_load = kgd_hqd_sdma_load,
  128. .hqd_is_occupied = kgd_hqd_is_occupied,
  129. .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
  130. .hqd_destroy = kgd_hqd_destroy,
  131. .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
  132. .address_watch_disable = kgd_address_watch_disable,
  133. .address_watch_execute = kgd_address_watch_execute,
  134. .wave_control_execute = kgd_wave_control_execute,
  135. .address_watch_get_offset = kgd_address_watch_get_offset,
  136. .get_atc_vmid_pasid_mapping_pasid =
  137. get_atc_vmid_pasid_mapping_pasid,
  138. .get_atc_vmid_pasid_mapping_valid =
  139. get_atc_vmid_pasid_mapping_valid,
  140. .write_vmid_invalidate_request = write_vmid_invalidate_request,
  141. .get_fw_version = get_fw_version,
  142. .set_scratch_backing_va = set_scratch_backing_va,
  143. .get_tile_config = get_tile_config,
  144. };
  145. struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
  146. {
  147. return (struct kfd2kgd_calls *)&kfd2kgd;
  148. }
  149. static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
  150. {
  151. return (struct amdgpu_device *)kgd;
  152. }
  153. static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
  154. uint32_t queue, uint32_t vmid)
  155. {
  156. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  157. uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
  158. mutex_lock(&adev->srbm_mutex);
  159. WREG32(mmSRBM_GFX_CNTL, value);
  160. }
  161. static void unlock_srbm(struct kgd_dev *kgd)
  162. {
  163. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  164. WREG32(mmSRBM_GFX_CNTL, 0);
  165. mutex_unlock(&adev->srbm_mutex);
  166. }
  167. static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
  168. uint32_t queue_id)
  169. {
  170. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  171. uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
  172. uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
  173. lock_srbm(kgd, mec, pipe, queue_id, 0);
  174. }
  175. static void release_queue(struct kgd_dev *kgd)
  176. {
  177. unlock_srbm(kgd);
  178. }
  179. static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
  180. uint32_t sh_mem_config,
  181. uint32_t sh_mem_ape1_base,
  182. uint32_t sh_mem_ape1_limit,
  183. uint32_t sh_mem_bases)
  184. {
  185. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  186. lock_srbm(kgd, 0, 0, 0, vmid);
  187. WREG32(mmSH_MEM_CONFIG, sh_mem_config);
  188. WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
  189. WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
  190. WREG32(mmSH_MEM_BASES, sh_mem_bases);
  191. unlock_srbm(kgd);
  192. }
  193. static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
  194. unsigned int vmid)
  195. {
  196. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  197. /*
  198. * We have to assume that there is no outstanding mapping.
  199. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
  200. * a mapping is in progress or because a mapping finished
  201. * and the SW cleared it.
  202. * So the protocol is to always wait & clear.
  203. */
  204. uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
  205. ATC_VMID0_PASID_MAPPING__VALID_MASK;
  206. WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
  207. while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
  208. cpu_relax();
  209. WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
  210. /* Mapping vmid to pasid also for IH block */
  211. WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
  212. return 0;
  213. }
  214. static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
  215. uint32_t hpd_size, uint64_t hpd_gpu_addr)
  216. {
  217. /* amdgpu owns the per-pipe state */
  218. return 0;
  219. }
  220. static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
  221. {
  222. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  223. uint32_t mec;
  224. uint32_t pipe;
  225. mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
  226. pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
  227. lock_srbm(kgd, mec, pipe, 0, 0);
  228. WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
  229. unlock_srbm(kgd);
  230. return 0;
  231. }
  232. static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
  233. {
  234. return 0;
  235. }
  236. static inline struct vi_mqd *get_mqd(void *mqd)
  237. {
  238. return (struct vi_mqd *)mqd;
  239. }
  240. static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
  241. {
  242. return (struct cik_sdma_rlc_registers *)mqd;
  243. }
  244. static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
  245. uint32_t queue_id, uint32_t __user *wptr,
  246. uint32_t wptr_shift, uint32_t wptr_mask,
  247. struct mm_struct *mm)
  248. {
  249. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  250. struct vi_mqd *m;
  251. uint32_t *mqd_hqd;
  252. uint32_t reg, wptr_val, data;
  253. m = get_mqd(mqd);
  254. acquire_queue(kgd, pipe_id, queue_id);
  255. /* HIQ is set during driver init period with vmid set to 0*/
  256. if (m->cp_hqd_vmid == 0) {
  257. uint32_t value, mec, pipe;
  258. mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
  259. pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
  260. pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
  261. mec, pipe, queue_id);
  262. value = RREG32(mmRLC_CP_SCHEDULERS);
  263. value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
  264. ((mec << 5) | (pipe << 3) | queue_id | 0x80));
  265. WREG32(mmRLC_CP_SCHEDULERS, value);
  266. }
  267. /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
  268. mqd_hqd = &m->cp_mqd_base_addr_lo;
  269. for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
  270. WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
  271. /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
  272. * This is safe since EOP RPTR==WPTR for any inactive HQD
  273. * on ASICs that do not support context-save.
  274. * EOP writes/reads can start anywhere in the ring.
  275. */
  276. if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
  277. WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
  278. WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
  279. WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
  280. }
  281. for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
  282. WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
  283. /* Copy userspace write pointer value to register.
  284. * Activate doorbell logic to monitor subsequent changes.
  285. */
  286. data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
  287. CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
  288. WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
  289. if (read_user_wptr(mm, wptr, wptr_val))
  290. WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
  291. data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
  292. WREG32(mmCP_HQD_ACTIVE, data);
  293. release_queue(kgd);
  294. return 0;
  295. }
  296. static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
  297. {
  298. return 0;
  299. }
  300. static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
  301. uint32_t pipe_id, uint32_t queue_id)
  302. {
  303. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  304. uint32_t act;
  305. bool retval = false;
  306. uint32_t low, high;
  307. acquire_queue(kgd, pipe_id, queue_id);
  308. act = RREG32(mmCP_HQD_ACTIVE);
  309. if (act) {
  310. low = lower_32_bits(queue_address >> 8);
  311. high = upper_32_bits(queue_address >> 8);
  312. if (low == RREG32(mmCP_HQD_PQ_BASE) &&
  313. high == RREG32(mmCP_HQD_PQ_BASE_HI))
  314. retval = true;
  315. }
  316. release_queue(kgd);
  317. return retval;
  318. }
  319. static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
  320. {
  321. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  322. struct cik_sdma_rlc_registers *m;
  323. uint32_t sdma_base_addr;
  324. uint32_t sdma_rlc_rb_cntl;
  325. m = get_sdma_mqd(mqd);
  326. sdma_base_addr = get_sdma_base_addr(m);
  327. sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
  328. if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
  329. return true;
  330. return false;
  331. }
  332. static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
  333. enum kfd_preempt_type reset_type,
  334. unsigned int utimeout, uint32_t pipe_id,
  335. uint32_t queue_id)
  336. {
  337. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  338. uint32_t temp;
  339. enum hqd_dequeue_request_type type;
  340. unsigned long flags, end_jiffies;
  341. int retry;
  342. struct vi_mqd *m = get_mqd(mqd);
  343. acquire_queue(kgd, pipe_id, queue_id);
  344. if (m->cp_hqd_vmid == 0)
  345. WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
  346. switch (reset_type) {
  347. case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
  348. type = DRAIN_PIPE;
  349. break;
  350. case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
  351. type = RESET_WAVES;
  352. break;
  353. default:
  354. type = DRAIN_PIPE;
  355. break;
  356. }
  357. /* Workaround: If IQ timer is active and the wait time is close to or
  358. * equal to 0, dequeueing is not safe. Wait until either the wait time
  359. * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
  360. * cleared before continuing. Also, ensure wait times are set to at
  361. * least 0x3.
  362. */
  363. local_irq_save(flags);
  364. preempt_disable();
  365. retry = 5000; /* wait for 500 usecs at maximum */
  366. while (true) {
  367. temp = RREG32(mmCP_HQD_IQ_TIMER);
  368. if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
  369. pr_debug("HW is processing IQ\n");
  370. goto loop;
  371. }
  372. if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
  373. if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
  374. == 3) /* SEM-rearm is safe */
  375. break;
  376. /* Wait time 3 is safe for CP, but our MMIO read/write
  377. * time is close to 1 microsecond, so check for 10 to
  378. * leave more buffer room
  379. */
  380. if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
  381. >= 10)
  382. break;
  383. pr_debug("IQ timer is active\n");
  384. } else
  385. break;
  386. loop:
  387. if (!retry) {
  388. pr_err("CP HQD IQ timer status time out\n");
  389. break;
  390. }
  391. ndelay(100);
  392. --retry;
  393. }
  394. retry = 1000;
  395. while (true) {
  396. temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
  397. if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
  398. break;
  399. pr_debug("Dequeue request is pending\n");
  400. if (!retry) {
  401. pr_err("CP HQD dequeue request time out\n");
  402. break;
  403. }
  404. ndelay(100);
  405. --retry;
  406. }
  407. local_irq_restore(flags);
  408. preempt_enable();
  409. WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
  410. end_jiffies = (utimeout * HZ / 1000) + jiffies;
  411. while (true) {
  412. temp = RREG32(mmCP_HQD_ACTIVE);
  413. if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
  414. break;
  415. if (time_after(jiffies, end_jiffies)) {
  416. pr_err("cp queue preemption time out.\n");
  417. release_queue(kgd);
  418. return -ETIME;
  419. }
  420. usleep_range(500, 1000);
  421. }
  422. release_queue(kgd);
  423. return 0;
  424. }
  425. static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
  426. unsigned int utimeout)
  427. {
  428. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  429. struct cik_sdma_rlc_registers *m;
  430. uint32_t sdma_base_addr;
  431. uint32_t temp;
  432. int timeout = utimeout;
  433. m = get_sdma_mqd(mqd);
  434. sdma_base_addr = get_sdma_base_addr(m);
  435. temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
  436. temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
  437. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
  438. while (true) {
  439. temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
  440. if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
  441. break;
  442. if (timeout <= 0)
  443. return -ETIME;
  444. msleep(20);
  445. timeout -= 20;
  446. }
  447. WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
  448. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
  449. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
  450. WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
  451. return 0;
  452. }
  453. static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
  454. uint8_t vmid)
  455. {
  456. uint32_t reg;
  457. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  458. reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
  459. return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
  460. }
  461. static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
  462. uint8_t vmid)
  463. {
  464. uint32_t reg;
  465. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  466. reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
  467. return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
  468. }
  469. static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
  470. {
  471. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  472. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  473. }
  474. static int kgd_address_watch_disable(struct kgd_dev *kgd)
  475. {
  476. return 0;
  477. }
  478. static int kgd_address_watch_execute(struct kgd_dev *kgd,
  479. unsigned int watch_point_id,
  480. uint32_t cntl_val,
  481. uint32_t addr_hi,
  482. uint32_t addr_lo)
  483. {
  484. return 0;
  485. }
  486. static int kgd_wave_control_execute(struct kgd_dev *kgd,
  487. uint32_t gfx_index_val,
  488. uint32_t sq_cmd)
  489. {
  490. struct amdgpu_device *adev = get_amdgpu_device(kgd);
  491. uint32_t data = 0;
  492. mutex_lock(&adev->grbm_idx_mutex);
  493. WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
  494. WREG32(mmSQ_CMD, sq_cmd);
  495. data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
  496. INSTANCE_BROADCAST_WRITES, 1);
  497. data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
  498. SH_BROADCAST_WRITES, 1);
  499. data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
  500. SE_BROADCAST_WRITES, 1);
  501. WREG32(mmGRBM_GFX_INDEX, data);
  502. mutex_unlock(&adev->grbm_idx_mutex);
  503. return 0;
  504. }
  505. static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
  506. unsigned int watch_point_id,
  507. unsigned int reg_offset)
  508. {
  509. return 0;
  510. }
  511. static void set_scratch_backing_va(struct kgd_dev *kgd,
  512. uint64_t va, uint32_t vmid)
  513. {
  514. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  515. lock_srbm(kgd, 0, 0, 0, vmid);
  516. WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
  517. unlock_srbm(kgd);
  518. }
  519. static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
  520. {
  521. struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
  522. const union amdgpu_firmware_header *hdr;
  523. BUG_ON(kgd == NULL);
  524. switch (type) {
  525. case KGD_ENGINE_PFP:
  526. hdr = (const union amdgpu_firmware_header *)
  527. adev->gfx.pfp_fw->data;
  528. break;
  529. case KGD_ENGINE_ME:
  530. hdr = (const union amdgpu_firmware_header *)
  531. adev->gfx.me_fw->data;
  532. break;
  533. case KGD_ENGINE_CE:
  534. hdr = (const union amdgpu_firmware_header *)
  535. adev->gfx.ce_fw->data;
  536. break;
  537. case KGD_ENGINE_MEC1:
  538. hdr = (const union amdgpu_firmware_header *)
  539. adev->gfx.mec_fw->data;
  540. break;
  541. case KGD_ENGINE_MEC2:
  542. hdr = (const union amdgpu_firmware_header *)
  543. adev->gfx.mec2_fw->data;
  544. break;
  545. case KGD_ENGINE_RLC:
  546. hdr = (const union amdgpu_firmware_header *)
  547. adev->gfx.rlc_fw->data;
  548. break;
  549. case KGD_ENGINE_SDMA1:
  550. hdr = (const union amdgpu_firmware_header *)
  551. adev->sdma.instance[0].fw->data;
  552. break;
  553. case KGD_ENGINE_SDMA2:
  554. hdr = (const union amdgpu_firmware_header *)
  555. adev->sdma.instance[1].fw->data;
  556. break;
  557. default:
  558. return 0;
  559. }
  560. if (hdr == NULL)
  561. return 0;
  562. /* Only 12 bit in use*/
  563. return hdr->common.ucode_version;
  564. }