si_dma.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <drm/drmP.h>
  25. #include "radeon.h"
  26. #include "radeon_asic.h"
  27. #include "radeon_trace.h"
  28. #include "sid.h"
  29. u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
  30. /**
  31. * si_dma_is_lockup - Check if the DMA engine is locked up
  32. *
  33. * @rdev: radeon_device pointer
  34. * @ring: radeon_ring structure holding ring information
  35. *
  36. * Check if the async DMA engine is locked up.
  37. * Returns true if the engine appears to be locked up, false if not.
  38. */
  39. bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  40. {
  41. u32 reset_mask = si_gpu_check_soft_reset(rdev);
  42. u32 mask;
  43. if (ring->idx == R600_RING_TYPE_DMA_INDEX)
  44. mask = RADEON_RESET_DMA;
  45. else
  46. mask = RADEON_RESET_DMA1;
  47. if (!(reset_mask & mask)) {
  48. radeon_ring_lockup_update(rdev, ring);
  49. return false;
  50. }
  51. return radeon_ring_test_lockup(rdev, ring);
  52. }
  53. /**
  54. * si_dma_vm_set_page - update the page tables using the DMA
  55. *
  56. * @rdev: radeon_device pointer
  57. * @ib: indirect buffer to fill with commands
  58. * @pe: addr of the page entry
  59. * @addr: dst addr to write into pe
  60. * @count: number of page entries to update
  61. * @incr: increase next addr by incr bytes
  62. * @flags: access flags
  63. *
  64. * Update the page tables using the DMA (SI).
  65. */
  66. void si_dma_vm_set_page(struct radeon_device *rdev,
  67. struct radeon_ib *ib,
  68. uint64_t pe,
  69. uint64_t addr, unsigned count,
  70. uint32_t incr, uint32_t flags)
  71. {
  72. uint64_t value;
  73. unsigned ndw;
  74. trace_radeon_vm_set_page(pe, addr, count, incr, flags);
  75. if (flags == R600_PTE_GART) {
  76. uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
  77. while (count) {
  78. unsigned bytes = count * 8;
  79. if (bytes > 0xFFFF8)
  80. bytes = 0xFFFF8;
  81. ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
  82. 1, 0, 0, bytes);
  83. ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  84. ib->ptr[ib->length_dw++] = lower_32_bits(src);
  85. ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  86. ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
  87. pe += bytes;
  88. src += bytes;
  89. count -= bytes / 8;
  90. }
  91. } else if (flags & R600_PTE_SYSTEM) {
  92. while (count) {
  93. ndw = count * 2;
  94. if (ndw > 0xFFFFE)
  95. ndw = 0xFFFFE;
  96. /* for non-physically contiguous pages (system) */
  97. ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
  98. ib->ptr[ib->length_dw++] = pe;
  99. ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  100. for (; ndw > 0; ndw -= 2, --count, pe += 8) {
  101. value = radeon_vm_map_gart(rdev, addr);
  102. value &= 0xFFFFFFFFFFFFF000ULL;
  103. addr += incr;
  104. value |= flags;
  105. ib->ptr[ib->length_dw++] = value;
  106. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  107. }
  108. }
  109. } else {
  110. while (count) {
  111. ndw = count * 2;
  112. if (ndw > 0xFFFFE)
  113. ndw = 0xFFFFE;
  114. if (flags & R600_PTE_VALID)
  115. value = addr;
  116. else
  117. value = 0;
  118. /* for physically contiguous pages (vram) */
  119. ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
  120. ib->ptr[ib->length_dw++] = pe; /* dst addr */
  121. ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  122. ib->ptr[ib->length_dw++] = flags; /* mask */
  123. ib->ptr[ib->length_dw++] = 0;
  124. ib->ptr[ib->length_dw++] = value; /* value */
  125. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  126. ib->ptr[ib->length_dw++] = incr; /* increment size */
  127. ib->ptr[ib->length_dw++] = 0;
  128. pe += ndw * 4;
  129. addr += (ndw / 2) * incr;
  130. count -= ndw / 2;
  131. }
  132. }
  133. while (ib->length_dw & 0x7)
  134. ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
  135. }
  136. void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
  137. {
  138. struct radeon_ring *ring = &rdev->ring[ridx];
  139. if (vm == NULL)
  140. return;
  141. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  142. if (vm->id < 8) {
  143. radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
  144. } else {
  145. radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
  146. }
  147. radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
  148. /* flush hdp cache */
  149. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  150. radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
  151. radeon_ring_write(ring, 1);
  152. /* bits 0-7 are the VM contexts0-7 */
  153. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  154. radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
  155. radeon_ring_write(ring, 1 << vm->id);
  156. }
  157. /**
  158. * si_copy_dma - copy pages using the DMA engine
  159. *
  160. * @rdev: radeon_device pointer
  161. * @src_offset: src GPU address
  162. * @dst_offset: dst GPU address
  163. * @num_gpu_pages: number of GPU pages to xfer
  164. * @fence: radeon fence object
  165. *
  166. * Copy GPU paging using the DMA engine (SI).
  167. * Used by the radeon ttm implementation to move pages if
  168. * registered as the asic copy callback.
  169. */
  170. int si_copy_dma(struct radeon_device *rdev,
  171. uint64_t src_offset, uint64_t dst_offset,
  172. unsigned num_gpu_pages,
  173. struct radeon_fence **fence)
  174. {
  175. struct radeon_semaphore *sem = NULL;
  176. int ring_index = rdev->asic->copy.dma_ring_index;
  177. struct radeon_ring *ring = &rdev->ring[ring_index];
  178. u32 size_in_bytes, cur_size_in_bytes;
  179. int i, num_loops;
  180. int r = 0;
  181. r = radeon_semaphore_create(rdev, &sem);
  182. if (r) {
  183. DRM_ERROR("radeon: moving bo (%d).\n", r);
  184. return r;
  185. }
  186. size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
  187. num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
  188. r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
  189. if (r) {
  190. DRM_ERROR("radeon: moving bo (%d).\n", r);
  191. radeon_semaphore_free(rdev, &sem, NULL);
  192. return r;
  193. }
  194. radeon_semaphore_sync_to(sem, *fence);
  195. radeon_semaphore_sync_rings(rdev, sem, ring->idx);
  196. for (i = 0; i < num_loops; i++) {
  197. cur_size_in_bytes = size_in_bytes;
  198. if (cur_size_in_bytes > 0xFFFFF)
  199. cur_size_in_bytes = 0xFFFFF;
  200. size_in_bytes -= cur_size_in_bytes;
  201. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
  202. radeon_ring_write(ring, lower_32_bits(dst_offset));
  203. radeon_ring_write(ring, lower_32_bits(src_offset));
  204. radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
  205. radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
  206. src_offset += cur_size_in_bytes;
  207. dst_offset += cur_size_in_bytes;
  208. }
  209. r = radeon_fence_emit(rdev, fence, ring->idx);
  210. if (r) {
  211. radeon_ring_unlock_undo(rdev, ring);
  212. radeon_semaphore_free(rdev, &sem, NULL);
  213. return r;
  214. }
  215. radeon_ring_unlock_commit(rdev, ring);
  216. radeon_semaphore_free(rdev, &sem, *fence);
  217. return r;
  218. }