amdgpu_virt.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
  25. int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
  26. {
  27. int r;
  28. void *ptr;
  29. r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
  30. AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
  31. &adev->virt.csa_vmid0_addr, &ptr);
  32. if (r)
  33. return r;
  34. memset(ptr, 0, AMDGPU_CSA_SIZE);
  35. return 0;
  36. }
  37. /*
  38. * amdgpu_map_static_csa should be called during amdgpu_vm_init
  39. * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
  40. * to this VM, and each command submission of GFX should use this virtual
  41. * address within META_DATA init package to support SRIOV gfx preemption.
  42. */
  43. int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  44. struct amdgpu_bo_va **bo_va)
  45. {
  46. struct ww_acquire_ctx ticket;
  47. struct list_head list;
  48. struct amdgpu_bo_list_entry pd;
  49. struct ttm_validate_buffer csa_tv;
  50. int r;
  51. INIT_LIST_HEAD(&list);
  52. INIT_LIST_HEAD(&csa_tv.head);
  53. csa_tv.bo = &adev->virt.csa_obj->tbo;
  54. csa_tv.shared = true;
  55. list_add(&csa_tv.head, &list);
  56. amdgpu_vm_get_pd_bo(vm, &list, &pd);
  57. r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
  58. if (r) {
  59. DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
  60. return r;
  61. }
  62. *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
  63. if (!*bo_va) {
  64. ttm_eu_backoff_reservation(&ticket, &list);
  65. DRM_ERROR("failed to create bo_va for static CSA\n");
  66. return -ENOMEM;
  67. }
  68. r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
  69. AMDGPU_CSA_SIZE);
  70. if (r) {
  71. DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
  72. amdgpu_vm_bo_rmv(adev, *bo_va);
  73. ttm_eu_backoff_reservation(&ticket, &list);
  74. return r;
  75. }
  76. r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
  77. AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
  78. AMDGPU_PTE_EXECUTABLE);
  79. if (r) {
  80. DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
  81. amdgpu_vm_bo_rmv(adev, *bo_va);
  82. ttm_eu_backoff_reservation(&ticket, &list);
  83. return r;
  84. }
  85. ttm_eu_backoff_reservation(&ticket, &list);
  86. return 0;
  87. }
  88. void amdgpu_virt_init_setting(struct amdgpu_device *adev)
  89. {
  90. /* enable virtual display */
  91. adev->mode_info.num_crtc = 1;
  92. adev->enable_virtual_display = true;
  93. adev->cg_flags = 0;
  94. adev->pg_flags = 0;
  95. mutex_init(&adev->virt.lock_reset);
  96. }
  97. uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
  98. {
  99. signed long r;
  100. unsigned long flags;
  101. uint32_t val, seq;
  102. struct amdgpu_kiq *kiq = &adev->gfx.kiq;
  103. struct amdgpu_ring *ring = &kiq->ring;
  104. BUG_ON(!ring->funcs->emit_rreg);
  105. spin_lock_irqsave(&kiq->ring_lock, flags);
  106. amdgpu_ring_alloc(ring, 32);
  107. amdgpu_ring_emit_rreg(ring, reg);
  108. amdgpu_fence_emit_polling(ring, &seq);
  109. amdgpu_ring_commit(ring);
  110. spin_unlock_irqrestore(&kiq->ring_lock, flags);
  111. r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
  112. if (r < 1) {
  113. DRM_ERROR("wait for kiq fence error: %ld\n", r);
  114. return ~0;
  115. }
  116. val = adev->wb.wb[adev->virt.reg_val_offs];
  117. return val;
  118. }
  119. void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
  120. {
  121. signed long r;
  122. unsigned long flags;
  123. uint32_t seq;
  124. struct amdgpu_kiq *kiq = &adev->gfx.kiq;
  125. struct amdgpu_ring *ring = &kiq->ring;
  126. BUG_ON(!ring->funcs->emit_wreg);
  127. spin_lock_irqsave(&kiq->ring_lock, flags);
  128. amdgpu_ring_alloc(ring, 32);
  129. amdgpu_ring_emit_wreg(ring, reg, v);
  130. amdgpu_fence_emit_polling(ring, &seq);
  131. amdgpu_ring_commit(ring);
  132. spin_unlock_irqrestore(&kiq->ring_lock, flags);
  133. r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
  134. if (r < 1)
  135. DRM_ERROR("wait for kiq fence error: %ld\n", r);
  136. }
  137. /**
  138. * amdgpu_virt_request_full_gpu() - request full gpu access
  139. * @amdgpu: amdgpu device.
  140. * @init: is driver init time.
  141. * When start to init/fini driver, first need to request full gpu access.
  142. * Return: Zero if request success, otherwise will return error.
  143. */
  144. int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
  145. {
  146. struct amdgpu_virt *virt = &adev->virt;
  147. int r;
  148. if (virt->ops && virt->ops->req_full_gpu) {
  149. r = virt->ops->req_full_gpu(adev, init);
  150. if (r)
  151. return r;
  152. adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
  153. }
  154. return 0;
  155. }
  156. /**
  157. * amdgpu_virt_release_full_gpu() - release full gpu access
  158. * @amdgpu: amdgpu device.
  159. * @init: is driver init time.
  160. * When finishing driver init/fini, need to release full gpu access.
  161. * Return: Zero if release success, otherwise will returen error.
  162. */
  163. int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
  164. {
  165. struct amdgpu_virt *virt = &adev->virt;
  166. int r;
  167. if (virt->ops && virt->ops->rel_full_gpu) {
  168. r = virt->ops->rel_full_gpu(adev, init);
  169. if (r)
  170. return r;
  171. adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
  172. }
  173. return 0;
  174. }
  175. /**
  176. * amdgpu_virt_reset_gpu() - reset gpu
  177. * @amdgpu: amdgpu device.
  178. * Send reset command to GPU hypervisor to reset GPU that VM is using
  179. * Return: Zero if reset success, otherwise will return error.
  180. */
  181. int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
  182. {
  183. struct amdgpu_virt *virt = &adev->virt;
  184. int r;
  185. if (virt->ops && virt->ops->reset_gpu) {
  186. r = virt->ops->reset_gpu(adev);
  187. if (r)
  188. return r;
  189. adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
  190. }
  191. return 0;
  192. }
  193. /**
  194. * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
  195. * @amdgpu: amdgpu device.
  196. * MM table is used by UVD and VCE for its initialization
  197. * Return: Zero if allocate success.
  198. */
  199. int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
  200. {
  201. int r;
  202. if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
  203. return 0;
  204. r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
  205. AMDGPU_GEM_DOMAIN_VRAM,
  206. &adev->virt.mm_table.bo,
  207. &adev->virt.mm_table.gpu_addr,
  208. (void *)&adev->virt.mm_table.cpu_addr);
  209. if (r) {
  210. DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
  211. return r;
  212. }
  213. memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
  214. DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
  215. adev->virt.mm_table.gpu_addr,
  216. adev->virt.mm_table.cpu_addr);
  217. return 0;
  218. }
  219. /**
  220. * amdgpu_virt_free_mm_table() - free mm table memory
  221. * @amdgpu: amdgpu device.
  222. * Free MM table memory
  223. */
  224. void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
  225. {
  226. if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
  227. return;
  228. amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
  229. &adev->virt.mm_table.gpu_addr,
  230. (void *)&adev->virt.mm_table.cpu_addr);
  231. adev->virt.mm_table.gpu_addr = 0;
  232. }
  233. int amdgpu_virt_fw_reserve_get_checksum(void *obj,
  234. unsigned long obj_size,
  235. unsigned int key,
  236. unsigned int chksum)
  237. {
  238. unsigned int ret = key;
  239. unsigned long i = 0;
  240. unsigned char *pos;
  241. pos = (char *)obj;
  242. /* calculate checksum */
  243. for (i = 0; i < obj_size; ++i)
  244. ret += *(pos + i);
  245. /* minus the chksum itself */
  246. pos = (char *)&chksum;
  247. for (i = 0; i < sizeof(chksum); ++i)
  248. ret -= *(pos + i);
  249. return ret;
  250. }
  251. void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
  252. {
  253. uint32_t pf2vf_ver = 0;
  254. uint32_t pf2vf_size = 0;
  255. uint32_t checksum = 0;
  256. uint32_t checkval;
  257. char *str;
  258. adev->virt.fw_reserve.p_pf2vf = NULL;
  259. adev->virt.fw_reserve.p_vf2pf = NULL;
  260. if (adev->fw_vram_usage.va != NULL) {
  261. adev->virt.fw_reserve.p_pf2vf =
  262. (struct amdgim_pf2vf_info_header *)(
  263. adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
  264. pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
  265. AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
  266. AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
  267. /* pf2vf message must be in 4K */
  268. if (pf2vf_size > 0 && pf2vf_size < 4096) {
  269. checkval = amdgpu_virt_fw_reserve_get_checksum(
  270. adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
  271. adev->virt.fw_reserve.checksum_key, checksum);
  272. if (checkval == checksum) {
  273. adev->virt.fw_reserve.p_vf2pf =
  274. ((void *)adev->virt.fw_reserve.p_pf2vf +
  275. pf2vf_size);
  276. memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
  277. sizeof(amdgim_vf2pf_info));
  278. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
  279. AMDGPU_FW_VRAM_VF2PF_VER);
  280. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
  281. sizeof(amdgim_vf2pf_info));
  282. AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
  283. &str);
  284. #ifdef MODULE
  285. if (THIS_MODULE->version != NULL)
  286. strcpy(str, THIS_MODULE->version);
  287. else
  288. #endif
  289. strcpy(str, "N/A");
  290. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
  291. 0);
  292. AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
  293. amdgpu_virt_fw_reserve_get_checksum(
  294. adev->virt.fw_reserve.p_vf2pf,
  295. pf2vf_size,
  296. adev->virt.fw_reserve.checksum_key, 0));
  297. }
  298. }
  299. }
  300. }