amdgpu_virt.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
  25. bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
  26. {
  27. /* By now all MMIO pages except mailbox are blocked */
  28. /* if blocking is enabled in hypervisor. Choose the */
  29. /* SCRATCH_REG0 to test. */
  30. return RREG32_NO_KIQ(0xc040) == 0xffffffff;
  31. }
  32. int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
  33. {
  34. int r;
  35. void *ptr;
  36. r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
  37. AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
  38. &adev->virt.csa_vmid0_addr, &ptr);
  39. if (r)
  40. return r;
  41. memset(ptr, 0, AMDGPU_CSA_SIZE);
  42. return 0;
  43. }
  44. void amdgpu_free_static_csa(struct amdgpu_device *adev) {
  45. amdgpu_bo_free_kernel(&adev->virt.csa_obj,
  46. &adev->virt.csa_vmid0_addr,
  47. NULL);
  48. }
  49. /*
  50. * amdgpu_map_static_csa should be called during amdgpu_vm_init
  51. * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
  52. * to this VM, and each command submission of GFX should use this virtual
  53. * address within META_DATA init package to support SRIOV gfx preemption.
  54. */
  55. int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  56. struct amdgpu_bo_va **bo_va)
  57. {
  58. struct ww_acquire_ctx ticket;
  59. struct list_head list;
  60. struct amdgpu_bo_list_entry pd;
  61. struct ttm_validate_buffer csa_tv;
  62. int r;
  63. INIT_LIST_HEAD(&list);
  64. INIT_LIST_HEAD(&csa_tv.head);
  65. csa_tv.bo = &adev->virt.csa_obj->tbo;
  66. csa_tv.shared = true;
  67. list_add(&csa_tv.head, &list);
  68. amdgpu_vm_get_pd_bo(vm, &list, &pd);
  69. r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
  70. if (r) {
  71. DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
  72. return r;
  73. }
  74. *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
  75. if (!*bo_va) {
  76. ttm_eu_backoff_reservation(&ticket, &list);
  77. DRM_ERROR("failed to create bo_va for static CSA\n");
  78. return -ENOMEM;
  79. }
  80. r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
  81. AMDGPU_CSA_SIZE);
  82. if (r) {
  83. DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
  84. amdgpu_vm_bo_rmv(adev, *bo_va);
  85. ttm_eu_backoff_reservation(&ticket, &list);
  86. return r;
  87. }
  88. r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
  89. AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
  90. AMDGPU_PTE_EXECUTABLE);
  91. if (r) {
  92. DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
  93. amdgpu_vm_bo_rmv(adev, *bo_va);
  94. ttm_eu_backoff_reservation(&ticket, &list);
  95. return r;
  96. }
  97. ttm_eu_backoff_reservation(&ticket, &list);
  98. return 0;
  99. }
  100. void amdgpu_virt_init_setting(struct amdgpu_device *adev)
  101. {
  102. /* enable virtual display */
  103. adev->mode_info.num_crtc = 1;
  104. adev->enable_virtual_display = true;
  105. adev->cg_flags = 0;
  106. adev->pg_flags = 0;
  107. }
  108. uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
  109. {
  110. signed long r;
  111. unsigned long flags;
  112. uint32_t val, seq;
  113. struct amdgpu_kiq *kiq = &adev->gfx.kiq;
  114. struct amdgpu_ring *ring = &kiq->ring;
  115. BUG_ON(!ring->funcs->emit_rreg);
  116. spin_lock_irqsave(&kiq->ring_lock, flags);
  117. amdgpu_ring_alloc(ring, 32);
  118. amdgpu_ring_emit_rreg(ring, reg);
  119. amdgpu_fence_emit_polling(ring, &seq);
  120. amdgpu_ring_commit(ring);
  121. spin_unlock_irqrestore(&kiq->ring_lock, flags);
  122. r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
  123. if (r < 1) {
  124. DRM_ERROR("wait for kiq fence error: %ld\n", r);
  125. return ~0;
  126. }
  127. val = adev->wb.wb[adev->virt.reg_val_offs];
  128. return val;
  129. }
  130. void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
  131. {
  132. signed long r;
  133. unsigned long flags;
  134. uint32_t seq;
  135. struct amdgpu_kiq *kiq = &adev->gfx.kiq;
  136. struct amdgpu_ring *ring = &kiq->ring;
  137. BUG_ON(!ring->funcs->emit_wreg);
  138. spin_lock_irqsave(&kiq->ring_lock, flags);
  139. amdgpu_ring_alloc(ring, 32);
  140. amdgpu_ring_emit_wreg(ring, reg, v);
  141. amdgpu_fence_emit_polling(ring, &seq);
  142. amdgpu_ring_commit(ring);
  143. spin_unlock_irqrestore(&kiq->ring_lock, flags);
  144. r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
  145. if (r < 1)
  146. DRM_ERROR("wait for kiq fence error: %ld\n", r);
  147. }
  148. /**
  149. * amdgpu_virt_request_full_gpu() - request full gpu access
  150. * @amdgpu: amdgpu device.
  151. * @init: is driver init time.
  152. * When start to init/fini driver, first need to request full gpu access.
  153. * Return: Zero if request success, otherwise will return error.
  154. */
  155. int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
  156. {
  157. struct amdgpu_virt *virt = &adev->virt;
  158. int r;
  159. if (virt->ops && virt->ops->req_full_gpu) {
  160. r = virt->ops->req_full_gpu(adev, init);
  161. if (r)
  162. return r;
  163. adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
  164. }
  165. return 0;
  166. }
  167. /**
  168. * amdgpu_virt_release_full_gpu() - release full gpu access
  169. * @amdgpu: amdgpu device.
  170. * @init: is driver init time.
  171. * When finishing driver init/fini, need to release full gpu access.
  172. * Return: Zero if release success, otherwise will returen error.
  173. */
  174. int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
  175. {
  176. struct amdgpu_virt *virt = &adev->virt;
  177. int r;
  178. if (virt->ops && virt->ops->rel_full_gpu) {
  179. r = virt->ops->rel_full_gpu(adev, init);
  180. if (r)
  181. return r;
  182. adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
  183. }
  184. return 0;
  185. }
  186. /**
  187. * amdgpu_virt_reset_gpu() - reset gpu
  188. * @amdgpu: amdgpu device.
  189. * Send reset command to GPU hypervisor to reset GPU that VM is using
  190. * Return: Zero if reset success, otherwise will return error.
  191. */
  192. int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
  193. {
  194. struct amdgpu_virt *virt = &adev->virt;
  195. int r;
  196. if (virt->ops && virt->ops->reset_gpu) {
  197. r = virt->ops->reset_gpu(adev);
  198. if (r)
  199. return r;
  200. adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
  201. }
  202. return 0;
  203. }
  204. /**
  205. * amdgpu_virt_wait_reset() - wait for reset gpu completed
  206. * @amdgpu: amdgpu device.
  207. * Wait for GPU reset completed.
  208. * Return: Zero if reset success, otherwise will return error.
  209. */
  210. int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
  211. {
  212. struct amdgpu_virt *virt = &adev->virt;
  213. if (!virt->ops || !virt->ops->wait_reset)
  214. return -EINVAL;
  215. return virt->ops->wait_reset(adev);
  216. }
  217. /**
  218. * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
  219. * @amdgpu: amdgpu device.
  220. * MM table is used by UVD and VCE for its initialization
  221. * Return: Zero if allocate success.
  222. */
  223. int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
  224. {
  225. int r;
  226. if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
  227. return 0;
  228. r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
  229. AMDGPU_GEM_DOMAIN_VRAM,
  230. &adev->virt.mm_table.bo,
  231. &adev->virt.mm_table.gpu_addr,
  232. (void *)&adev->virt.mm_table.cpu_addr);
  233. if (r) {
  234. DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
  235. return r;
  236. }
  237. memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
  238. DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
  239. adev->virt.mm_table.gpu_addr,
  240. adev->virt.mm_table.cpu_addr);
  241. return 0;
  242. }
  243. /**
  244. * amdgpu_virt_free_mm_table() - free mm table memory
  245. * @amdgpu: amdgpu device.
  246. * Free MM table memory
  247. */
  248. void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
  249. {
  250. if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
  251. return;
  252. amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
  253. &adev->virt.mm_table.gpu_addr,
  254. (void *)&adev->virt.mm_table.cpu_addr);
  255. adev->virt.mm_table.gpu_addr = 0;
  256. }
  257. int amdgpu_virt_fw_reserve_get_checksum(void *obj,
  258. unsigned long obj_size,
  259. unsigned int key,
  260. unsigned int chksum)
  261. {
  262. unsigned int ret = key;
  263. unsigned long i = 0;
  264. unsigned char *pos;
  265. pos = (char *)obj;
  266. /* calculate checksum */
  267. for (i = 0; i < obj_size; ++i)
  268. ret += *(pos + i);
  269. /* minus the chksum itself */
  270. pos = (char *)&chksum;
  271. for (i = 0; i < sizeof(chksum); ++i)
  272. ret -= *(pos + i);
  273. return ret;
  274. }
  275. void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
  276. {
  277. uint32_t pf2vf_size = 0;
  278. uint32_t checksum = 0;
  279. uint32_t checkval;
  280. char *str;
  281. adev->virt.fw_reserve.p_pf2vf = NULL;
  282. adev->virt.fw_reserve.p_vf2pf = NULL;
  283. if (adev->fw_vram_usage.va != NULL) {
  284. adev->virt.fw_reserve.p_pf2vf =
  285. (struct amdgim_pf2vf_info_header *)(
  286. adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
  287. AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
  288. AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
  289. AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
  290. /* pf2vf message must be in 4K */
  291. if (pf2vf_size > 0 && pf2vf_size < 4096) {
  292. checkval = amdgpu_virt_fw_reserve_get_checksum(
  293. adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
  294. adev->virt.fw_reserve.checksum_key, checksum);
  295. if (checkval == checksum) {
  296. adev->virt.fw_reserve.p_vf2pf =
  297. ((void *)adev->virt.fw_reserve.p_pf2vf +
  298. pf2vf_size);
  299. memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
  300. sizeof(amdgim_vf2pf_info));
  301. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
  302. AMDGPU_FW_VRAM_VF2PF_VER);
  303. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
  304. sizeof(amdgim_vf2pf_info));
  305. AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
  306. &str);
  307. #ifdef MODULE
  308. if (THIS_MODULE->version != NULL)
  309. strcpy(str, THIS_MODULE->version);
  310. else
  311. #endif
  312. strcpy(str, "N/A");
  313. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
  314. 0);
  315. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
  316. amdgpu_virt_fw_reserve_get_checksum(
  317. adev->virt.fw_reserve.p_vf2pf,
  318. pf2vf_size,
  319. adev->virt.fw_reserve.checksum_key, 0));
  320. }
  321. }
  322. }
  323. }