amdgpu_ib.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. * Christian König
  28. */
  29. #include <linux/seq_file.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/amdgpu_drm.h>
  33. #include "amdgpu.h"
  34. #include "atom.h"
  35. #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
  36. /*
  37. * IB
  38. * IBs (Indirect Buffers) and areas of GPU accessible memory where
  39. * commands are stored. You can put a pointer to the IB in the
  40. * command ring and the hw will fetch the commands from the IB
  41. * and execute them. Generally userspace acceleration drivers
  42. * produce command buffers which are send to the kernel and
  43. * put in IBs for execution by the requested ring.
  44. */
  45. static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
  46. /**
  47. * amdgpu_ib_get - request an IB (Indirect Buffer)
  48. *
  49. * @ring: ring index the IB is associated with
  50. * @size: requested IB size
  51. * @ib: IB object returned
  52. *
  53. * Request an IB (all asics). IBs are allocated using the
  54. * suballocator.
  55. * Returns 0 on success, error on failure.
  56. */
  57. int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  58. unsigned size, struct amdgpu_ib *ib)
  59. {
  60. int r;
  61. if (size) {
  62. r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
  63. &ib->sa_bo, size, 256);
  64. if (r) {
  65. dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
  66. return r;
  67. }
  68. ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
  69. if (!vm)
  70. ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
  71. }
  72. return 0;
  73. }
  74. /**
  75. * amdgpu_ib_free - free an IB (Indirect Buffer)
  76. *
  77. * @adev: amdgpu_device pointer
  78. * @ib: IB object to free
  79. * @f: the fence SA bo need wait on for the ib alloation
  80. *
  81. * Free an IB (all asics).
  82. */
  83. void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
  84. struct dma_fence *f)
  85. {
  86. amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
  87. }
  88. /**
  89. * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
  90. *
  91. * @adev: amdgpu_device pointer
  92. * @num_ibs: number of IBs to schedule
  93. * @ibs: IB objects to schedule
  94. * @f: fence created during this submission
  95. *
  96. * Schedule an IB on the associated ring (all asics).
  97. * Returns 0 on success, error on failure.
  98. *
  99. * On SI, there are two parallel engines fed from the primary ring,
  100. * the CE (Constant Engine) and the DE (Drawing Engine). Since
  101. * resource descriptors have moved to memory, the CE allows you to
  102. * prime the caches while the DE is updating register state so that
  103. * the resource descriptors will be already in cache when the draw is
  104. * processed. To accomplish this, the userspace driver submits two
  105. * IBs, one for the CE and one for the DE. If there is a CE IB (called
  106. * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
  107. * to SI there was just a DE IB.
  108. */
  109. int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  110. struct amdgpu_ib *ibs, struct amdgpu_job *job,
  111. struct dma_fence **f)
  112. {
  113. struct amdgpu_device *adev = ring->adev;
  114. struct amdgpu_ib *ib = &ibs[0];
  115. struct dma_fence *tmp = NULL;
  116. bool skip_preamble, need_ctx_switch;
  117. unsigned patch_offset = ~0;
  118. struct amdgpu_vm *vm;
  119. uint64_t fence_ctx;
  120. uint32_t status = 0, alloc_size;
  121. unsigned fence_flags = 0;
  122. unsigned i;
  123. int r = 0;
  124. bool need_pipe_sync = false;
  125. if (num_ibs == 0)
  126. return -EINVAL;
  127. /* ring tests don't use a job */
  128. if (job) {
  129. vm = job->vm;
  130. fence_ctx = job->fence_ctx;
  131. } else {
  132. vm = NULL;
  133. fence_ctx = 0;
  134. }
  135. if (!ring->ready) {
  136. dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
  137. return -EINVAL;
  138. }
  139. if (vm && !job->vmid) {
  140. dev_err(adev->dev, "VM IB without ID\n");
  141. return -EINVAL;
  142. }
  143. alloc_size = ring->funcs->emit_frame_size + num_ibs *
  144. ring->funcs->emit_ib_size;
  145. r = amdgpu_ring_alloc(ring, alloc_size);
  146. if (r) {
  147. dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
  148. return r;
  149. }
  150. if (ring->funcs->emit_pipeline_sync && job &&
  151. ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
  152. amdgpu_vm_need_pipeline_sync(ring, job))) {
  153. need_pipe_sync = true;
  154. dma_fence_put(tmp);
  155. }
  156. if (ring->funcs->insert_start)
  157. ring->funcs->insert_start(ring);
  158. if (job) {
  159. r = amdgpu_vm_flush(ring, job, need_pipe_sync);
  160. if (r) {
  161. amdgpu_ring_undo(ring);
  162. return r;
  163. }
  164. }
  165. if (job && ring->funcs->init_cond_exec)
  166. patch_offset = amdgpu_ring_init_cond_exec(ring);
  167. #ifdef CONFIG_X86_64
  168. if (!(adev->flags & AMD_IS_APU))
  169. #endif
  170. {
  171. if (ring->funcs->emit_hdp_flush)
  172. amdgpu_ring_emit_hdp_flush(ring);
  173. else
  174. amdgpu_asic_flush_hdp(adev, ring);
  175. }
  176. skip_preamble = ring->current_ctx == fence_ctx;
  177. need_ctx_switch = ring->current_ctx != fence_ctx;
  178. if (job && ring->funcs->emit_cntxcntl) {
  179. if (need_ctx_switch)
  180. status |= AMDGPU_HAVE_CTX_SWITCH;
  181. status |= job->preamble_status;
  182. amdgpu_ring_emit_cntxcntl(ring, status);
  183. }
  184. for (i = 0; i < num_ibs; ++i) {
  185. ib = &ibs[i];
  186. /* drop preamble IBs if we don't have a context switch */
  187. if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
  188. skip_preamble &&
  189. !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
  190. !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
  191. continue;
  192. amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
  193. need_ctx_switch);
  194. need_ctx_switch = false;
  195. }
  196. if (ring->funcs->emit_tmz)
  197. amdgpu_ring_emit_tmz(ring, false);
  198. #ifdef CONFIG_X86_64
  199. if (!(adev->flags & AMD_IS_APU))
  200. #endif
  201. amdgpu_asic_invalidate_hdp(adev, ring);
  202. if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
  203. fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
  204. r = amdgpu_fence_emit(ring, f, fence_flags);
  205. if (r) {
  206. dev_err(adev->dev, "failed to emit fence (%d)\n", r);
  207. if (job && job->vmid)
  208. amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
  209. amdgpu_ring_undo(ring);
  210. return r;
  211. }
  212. if (ring->funcs->insert_end)
  213. ring->funcs->insert_end(ring);
  214. /* wrap the last IB with fence */
  215. if (job && job->uf_addr) {
  216. amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
  217. fence_flags | AMDGPU_FENCE_FLAG_64BIT);
  218. }
  219. if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
  220. amdgpu_ring_patch_cond_exec(ring, patch_offset);
  221. ring->current_ctx = fence_ctx;
  222. if (vm && ring->funcs->emit_switch_buffer)
  223. amdgpu_ring_emit_switch_buffer(ring);
  224. amdgpu_ring_commit(ring);
  225. return 0;
  226. }
  227. /**
  228. * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
  229. *
  230. * @adev: amdgpu_device pointer
  231. *
  232. * Initialize the suballocator to manage a pool of memory
  233. * for use as IBs (all asics).
  234. * Returns 0 on success, error on failure.
  235. */
  236. int amdgpu_ib_pool_init(struct amdgpu_device *adev)
  237. {
  238. int r;
  239. if (adev->ib_pool_ready) {
  240. return 0;
  241. }
  242. r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
  243. AMDGPU_IB_POOL_SIZE*64*1024,
  244. AMDGPU_GPU_PAGE_SIZE,
  245. AMDGPU_GEM_DOMAIN_GTT);
  246. if (r) {
  247. return r;
  248. }
  249. adev->ib_pool_ready = true;
  250. if (amdgpu_debugfs_sa_init(adev)) {
  251. dev_err(adev->dev, "failed to register debugfs file for SA\n");
  252. }
  253. return 0;
  254. }
  255. /**
  256. * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
  257. *
  258. * @adev: amdgpu_device pointer
  259. *
  260. * Tear down the suballocator managing the pool of memory
  261. * for use as IBs (all asics).
  262. */
  263. void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
  264. {
  265. if (adev->ib_pool_ready) {
  266. amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
  267. adev->ib_pool_ready = false;
  268. }
  269. }
  270. /**
  271. * amdgpu_ib_ring_tests - test IBs on the rings
  272. *
  273. * @adev: amdgpu_device pointer
  274. *
  275. * Test an IB (Indirect Buffer) on each ring.
  276. * If the test fails, disable the ring.
  277. * Returns 0 on success, error if the primary GFX ring
  278. * IB test fails.
  279. */
  280. int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
  281. {
  282. unsigned i;
  283. int r, ret = 0;
  284. long tmo_gfx, tmo_mm;
  285. tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
  286. if (amdgpu_sriov_vf(adev)) {
  287. /* for MM engines in hypervisor side they are not scheduled together
  288. * with CP and SDMA engines, so even in exclusive mode MM engine could
  289. * still running on other VF thus the IB TEST TIMEOUT for MM engines
  290. * under SR-IOV should be set to a long time. 8 sec should be enough
  291. * for the MM comes back to this VF.
  292. */
  293. tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
  294. }
  295. if (amdgpu_sriov_runtime(adev)) {
  296. /* for CP & SDMA engines since they are scheduled together so
  297. * need to make the timeout width enough to cover the time
  298. * cost waiting for it coming back under RUNTIME only
  299. */
  300. tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
  301. }
  302. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  303. struct amdgpu_ring *ring = adev->rings[i];
  304. long tmo;
  305. if (!ring || !ring->ready)
  306. continue;
  307. /* MM engine need more time */
  308. if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
  309. ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
  310. ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
  311. ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
  312. ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
  313. tmo = tmo_mm;
  314. else
  315. tmo = tmo_gfx;
  316. r = amdgpu_ring_test_ib(ring, tmo);
  317. if (r) {
  318. ring->ready = false;
  319. if (ring == &adev->gfx.gfx_ring[0]) {
  320. /* oh, oh, that's really bad */
  321. DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
  322. adev->accel_working = false;
  323. return r;
  324. } else {
  325. /* still not good, but we can live with it */
  326. DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
  327. ret = r;
  328. }
  329. }
  330. }
  331. return ret;
  332. }
  333. /*
  334. * Debugfs info
  335. */
  336. #if defined(CONFIG_DEBUG_FS)
  337. static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
  338. {
  339. struct drm_info_node *node = (struct drm_info_node *) m->private;
  340. struct drm_device *dev = node->minor->dev;
  341. struct amdgpu_device *adev = dev->dev_private;
  342. amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
  343. return 0;
  344. }
  345. static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
  346. {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
  347. };
  348. #endif
  349. static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
  350. {
  351. #if defined(CONFIG_DEBUG_FS)
  352. return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
  353. #else
  354. return 0;
  355. #endif
  356. }