amdgpu_ring.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. * Christian König
  28. */
  29. #include <linux/seq_file.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/amdgpu_drm.h>
  33. #include "amdgpu.h"
  34. #include "atom.h"
  35. /*
  36. * Rings
  37. * Most engines on the GPU are fed via ring buffers. Ring
  38. * buffers are areas of GPU accessible memory that the host
  39. * writes commands into and the GPU reads commands out of.
  40. * There is a rptr (read pointer) that determines where the
  41. * GPU is currently reading, and a wptr (write pointer)
  42. * which determines where the host has written. When the
  43. * pointers are equal, the ring is idle. When the host
  44. * writes commands to the ring buffer, it increments the
  45. * wptr. The GPU then starts fetching commands and executes
  46. * them until the pointers are equal again.
  47. */
  48. static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
  49. /**
  50. * amdgpu_ring_alloc - allocate space on the ring buffer
  51. *
  52. * @adev: amdgpu_device pointer
  53. * @ring: amdgpu_ring structure holding ring information
  54. * @ndw: number of dwords to allocate in the ring buffer
  55. *
  56. * Allocate @ndw dwords in the ring buffer (all asics).
  57. * Returns 0 on success, error on failure.
  58. */
  59. int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
  60. {
  61. /* Align requested size with padding so unlock_commit can
  62. * pad safely */
  63. ndw = (ndw + ring->align_mask) & ~ring->align_mask;
  64. /* Make sure we aren't trying to allocate more space
  65. * than the maximum for one submission
  66. */
  67. if (WARN_ON_ONCE(ndw > ring->max_dw))
  68. return -ENOMEM;
  69. ring->count_dw = ndw;
  70. ring->wptr_old = ring->wptr;
  71. return 0;
  72. }
  73. /** amdgpu_ring_insert_nop - insert NOP packets
  74. *
  75. * @ring: amdgpu_ring structure holding ring information
  76. * @count: the number of NOP packets to insert
  77. *
  78. * This is the generic insert_nop function for rings except SDMA
  79. */
  80. void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  81. {
  82. int i;
  83. for (i = 0; i < count; i++)
  84. amdgpu_ring_write(ring, ring->nop);
  85. }
  86. /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
  87. *
  88. * @ring: amdgpu_ring structure holding ring information
  89. * @ib: IB to add NOP packets to
  90. *
  91. * This is the generic pad_ib function for rings except SDMA
  92. */
  93. void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
  94. {
  95. while (ib->length_dw & ring->align_mask)
  96. ib->ptr[ib->length_dw++] = ring->nop;
  97. }
  98. /**
  99. * amdgpu_ring_commit - tell the GPU to execute the new
  100. * commands on the ring buffer
  101. *
  102. * @adev: amdgpu_device pointer
  103. * @ring: amdgpu_ring structure holding ring information
  104. *
  105. * Update the wptr (write pointer) to tell the GPU to
  106. * execute new commands on the ring buffer (all asics).
  107. */
  108. void amdgpu_ring_commit(struct amdgpu_ring *ring)
  109. {
  110. uint32_t count;
  111. /* We pad to match fetch size */
  112. count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
  113. count %= ring->align_mask + 1;
  114. ring->funcs->insert_nop(ring, count);
  115. mb();
  116. amdgpu_ring_set_wptr(ring);
  117. }
  118. /**
  119. * amdgpu_ring_undo - reset the wptr
  120. *
  121. * @ring: amdgpu_ring structure holding ring information
  122. *
  123. * Reset the driver's copy of the wptr (all asics).
  124. */
  125. void amdgpu_ring_undo(struct amdgpu_ring *ring)
  126. {
  127. ring->wptr = ring->wptr_old;
  128. }
  129. /**
  130. * amdgpu_ring_backup - Back up the content of a ring
  131. *
  132. * @ring: the ring we want to back up
  133. *
  134. * Saves all unprocessed commits from a ring, returns the number of dwords saved.
  135. */
  136. unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
  137. uint32_t **data)
  138. {
  139. unsigned size, ptr, i;
  140. *data = NULL;
  141. if (ring->ring_obj == NULL)
  142. return 0;
  143. /* it doesn't make sense to save anything if all fences are signaled */
  144. if (!amdgpu_fence_count_emitted(ring))
  145. return 0;
  146. ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
  147. size = ring->wptr + (ring->ring_size / 4);
  148. size -= ptr;
  149. size &= ring->ptr_mask;
  150. if (size == 0)
  151. return 0;
  152. /* and then save the content of the ring */
  153. *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
  154. if (!*data)
  155. return 0;
  156. for (i = 0; i < size; ++i) {
  157. (*data)[i] = ring->ring[ptr++];
  158. ptr &= ring->ptr_mask;
  159. }
  160. return size;
  161. }
  162. /**
  163. * amdgpu_ring_restore - append saved commands to the ring again
  164. *
  165. * @ring: ring to append commands to
  166. * @size: number of dwords we want to write
  167. * @data: saved commands
  168. *
  169. * Allocates space on the ring and restore the previously saved commands.
  170. */
  171. int amdgpu_ring_restore(struct amdgpu_ring *ring,
  172. unsigned size, uint32_t *data)
  173. {
  174. int i, r;
  175. if (!size || !data)
  176. return 0;
  177. /* restore the saved ring content */
  178. r = amdgpu_ring_alloc(ring, size);
  179. if (r)
  180. return r;
  181. for (i = 0; i < size; ++i) {
  182. amdgpu_ring_write(ring, data[i]);
  183. }
  184. amdgpu_ring_commit(ring);
  185. kfree(data);
  186. return 0;
  187. }
  188. /**
  189. * amdgpu_ring_init - init driver ring struct.
  190. *
  191. * @adev: amdgpu_device pointer
  192. * @ring: amdgpu_ring structure holding ring information
  193. * @ring_size: size of the ring
  194. * @nop: nop packet for this ring
  195. *
  196. * Initialize the driver information for the selected ring (all asics).
  197. * Returns 0 on success, error on failure.
  198. */
  199. int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  200. unsigned ring_size, u32 nop, u32 align_mask,
  201. struct amdgpu_irq_src *irq_src, unsigned irq_type,
  202. enum amdgpu_ring_type ring_type)
  203. {
  204. u32 rb_bufsz;
  205. int r;
  206. if (ring->adev == NULL) {
  207. if (adev->num_rings >= AMDGPU_MAX_RINGS)
  208. return -EINVAL;
  209. ring->adev = adev;
  210. ring->idx = adev->num_rings++;
  211. adev->rings[ring->idx] = ring;
  212. r = amdgpu_fence_driver_init_ring(ring,
  213. amdgpu_sched_hw_submission);
  214. if (r)
  215. return r;
  216. }
  217. r = amdgpu_wb_get(adev, &ring->rptr_offs);
  218. if (r) {
  219. dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
  220. return r;
  221. }
  222. r = amdgpu_wb_get(adev, &ring->wptr_offs);
  223. if (r) {
  224. dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
  225. return r;
  226. }
  227. r = amdgpu_wb_get(adev, &ring->fence_offs);
  228. if (r) {
  229. dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
  230. return r;
  231. }
  232. r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
  233. if (r) {
  234. dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
  235. return r;
  236. }
  237. ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
  238. ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
  239. spin_lock_init(&ring->fence_lock);
  240. r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
  241. if (r) {
  242. dev_err(adev->dev, "failed initializing fences (%d).\n", r);
  243. return r;
  244. }
  245. /* Align ring size */
  246. rb_bufsz = order_base_2(ring_size / 8);
  247. ring_size = (1 << (rb_bufsz + 1)) * 4;
  248. ring->ring_size = ring_size;
  249. ring->align_mask = align_mask;
  250. ring->nop = nop;
  251. ring->type = ring_type;
  252. /* Allocate ring buffer */
  253. if (ring->ring_obj == NULL) {
  254. r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
  255. AMDGPU_GEM_DOMAIN_GTT, 0,
  256. NULL, NULL, &ring->ring_obj);
  257. if (r) {
  258. dev_err(adev->dev, "(%d) ring create failed\n", r);
  259. return r;
  260. }
  261. r = amdgpu_bo_reserve(ring->ring_obj, false);
  262. if (unlikely(r != 0))
  263. return r;
  264. r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
  265. &ring->gpu_addr);
  266. if (r) {
  267. amdgpu_bo_unreserve(ring->ring_obj);
  268. dev_err(adev->dev, "(%d) ring pin failed\n", r);
  269. return r;
  270. }
  271. r = amdgpu_bo_kmap(ring->ring_obj,
  272. (void **)&ring->ring);
  273. amdgpu_bo_unreserve(ring->ring_obj);
  274. if (r) {
  275. dev_err(adev->dev, "(%d) ring map failed\n", r);
  276. return r;
  277. }
  278. }
  279. ring->ptr_mask = (ring->ring_size / 4) - 1;
  280. ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4,
  281. amdgpu_sched_hw_submission);
  282. if (amdgpu_debugfs_ring_init(adev, ring)) {
  283. DRM_ERROR("Failed to register debugfs file for rings !\n");
  284. }
  285. return 0;
  286. }
  287. /**
  288. * amdgpu_ring_fini - tear down the driver ring struct.
  289. *
  290. * @adev: amdgpu_device pointer
  291. * @ring: amdgpu_ring structure holding ring information
  292. *
  293. * Tear down the driver information for the selected ring (all asics).
  294. */
  295. void amdgpu_ring_fini(struct amdgpu_ring *ring)
  296. {
  297. int r;
  298. struct amdgpu_bo *ring_obj;
  299. ring_obj = ring->ring_obj;
  300. ring->ready = false;
  301. ring->ring = NULL;
  302. ring->ring_obj = NULL;
  303. amdgpu_wb_free(ring->adev, ring->fence_offs);
  304. amdgpu_wb_free(ring->adev, ring->rptr_offs);
  305. amdgpu_wb_free(ring->adev, ring->wptr_offs);
  306. amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
  307. if (ring_obj) {
  308. r = amdgpu_bo_reserve(ring_obj, false);
  309. if (likely(r == 0)) {
  310. amdgpu_bo_kunmap(ring_obj);
  311. amdgpu_bo_unpin(ring_obj);
  312. amdgpu_bo_unreserve(ring_obj);
  313. }
  314. amdgpu_bo_unref(&ring_obj);
  315. }
  316. }
  317. /*
  318. * Debugfs info
  319. */
  320. #if defined(CONFIG_DEBUG_FS)
  321. static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
  322. {
  323. struct drm_info_node *node = (struct drm_info_node *) m->private;
  324. struct drm_device *dev = node->minor->dev;
  325. struct amdgpu_device *adev = dev->dev_private;
  326. int roffset = *(int*)node->info_ent->data;
  327. struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
  328. uint32_t rptr, wptr, rptr_next;
  329. unsigned i;
  330. wptr = amdgpu_ring_get_wptr(ring);
  331. seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
  332. rptr = amdgpu_ring_get_rptr(ring);
  333. rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
  334. seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
  335. seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
  336. ring->wptr, ring->wptr);
  337. if (!ring->ready)
  338. return 0;
  339. /* print 8 dw before current rptr as often it's the last executed
  340. * packet that is the root issue
  341. */
  342. i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
  343. while (i != rptr) {
  344. seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
  345. if (i == rptr)
  346. seq_puts(m, " *");
  347. if (i == rptr_next)
  348. seq_puts(m, " #");
  349. seq_puts(m, "\n");
  350. i = (i + 1) & ring->ptr_mask;
  351. }
  352. while (i != wptr) {
  353. seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
  354. if (i == rptr)
  355. seq_puts(m, " *");
  356. if (i == rptr_next)
  357. seq_puts(m, " #");
  358. seq_puts(m, "\n");
  359. i = (i + 1) & ring->ptr_mask;
  360. }
  361. return 0;
  362. }
  363. /* TODO: clean this up !*/
  364. static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
  365. static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
  366. static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
  367. static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
  368. static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
  369. static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
  370. static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
  371. static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
  372. static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
  373. {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
  374. {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
  375. {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
  376. {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
  377. {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
  378. {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
  379. {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
  380. {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
  381. };
  382. #endif
  383. static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
  384. {
  385. #if defined(CONFIG_DEBUG_FS)
  386. unsigned i;
  387. for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
  388. struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
  389. int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
  390. struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
  391. unsigned r;
  392. if (other != ring)
  393. continue;
  394. r = amdgpu_debugfs_add_files(adev, info, 1);
  395. if (r)
  396. return r;
  397. }
  398. #endif
  399. return 0;
  400. }