etnaviv_buffer.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. /*
  2. * Copyright (C) 2014 Etnaviv Project
  3. * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "etnaviv_cmdbuf.h"
  18. #include "etnaviv_gpu.h"
  19. #include "etnaviv_gem.h"
  20. #include "etnaviv_mmu.h"
  21. #include "common.xml.h"
  22. #include "state.xml.h"
  23. #include "state_hi.xml.h"
  24. #include "state_3d.xml.h"
  25. #include "cmdstream.xml.h"
  26. /*
  27. * Command Buffer helper:
  28. */
  29. static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
  30. {
  31. u32 *vaddr = (u32 *)buffer->vaddr;
  32. BUG_ON(buffer->user_size >= buffer->size);
  33. vaddr[buffer->user_size / 4] = data;
  34. buffer->user_size += 4;
  35. }
  36. static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
  37. u32 reg, u32 value)
  38. {
  39. u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
  40. buffer->user_size = ALIGN(buffer->user_size, 8);
  41. /* write a register via cmd stream */
  42. OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
  43. VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
  44. VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
  45. OUT(buffer, value);
  46. }
  47. static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
  48. {
  49. buffer->user_size = ALIGN(buffer->user_size, 8);
  50. OUT(buffer, VIV_FE_END_HEADER_OP_END);
  51. }
  52. static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
  53. {
  54. buffer->user_size = ALIGN(buffer->user_size, 8);
  55. OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
  56. }
  57. static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
  58. u16 prefetch, u32 address)
  59. {
  60. buffer->user_size = ALIGN(buffer->user_size, 8);
  61. OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
  62. VIV_FE_LINK_HEADER_PREFETCH(prefetch));
  63. OUT(buffer, address);
  64. }
  65. static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
  66. u32 from, u32 to)
  67. {
  68. buffer->user_size = ALIGN(buffer->user_size, 8);
  69. OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
  70. OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
  71. }
  72. static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
  73. {
  74. CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
  75. VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
  76. VIVS_GL_SEMAPHORE_TOKEN_TO(to));
  77. }
  78. static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
  79. struct etnaviv_cmdbuf *buffer, u8 pipe)
  80. {
  81. u32 flush = 0;
  82. lockdep_assert_held(&gpu->lock);
  83. /*
  84. * This assumes that if we're switching to 2D, we're switching
  85. * away from 3D, and vice versa. Hence, if we're switching to
  86. * the 2D core, we need to flush the 3D depth and color caches,
  87. * otherwise we need to flush the 2D pixel engine cache.
  88. */
  89. if (gpu->exec_state == ETNA_PIPE_2D)
  90. flush = VIVS_GL_FLUSH_CACHE_PE2D;
  91. else if (gpu->exec_state == ETNA_PIPE_3D)
  92. flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
  93. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
  94. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  95. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  96. CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
  97. VIVS_GL_PIPE_SELECT_PIPE(pipe));
  98. }
  99. static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
  100. struct etnaviv_cmdbuf *buf, u32 off, u32 len)
  101. {
  102. u32 size = buf->size;
  103. u32 *ptr = buf->vaddr + off;
  104. dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
  105. ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
  106. print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
  107. ptr, len * 4, 0);
  108. }
  109. /*
  110. * Safely replace the WAIT of a waitlink with a new command and argument.
  111. * The GPU may be executing this WAIT while we're modifying it, so we have
  112. * to write it in a specific order to avoid the GPU branching to somewhere
  113. * else. 'wl_offset' is the offset to the first byte of the WAIT command.
  114. */
  115. static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
  116. unsigned int wl_offset, u32 cmd, u32 arg)
  117. {
  118. u32 *lw = buffer->vaddr + wl_offset;
  119. lw[1] = arg;
  120. mb();
  121. lw[0] = cmd;
  122. mb();
  123. }
  124. /*
  125. * Ensure that there is space in the command buffer to contiguously write
  126. * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
  127. */
  128. static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
  129. struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
  130. {
  131. if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
  132. buffer->user_size = 0;
  133. return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
  134. }
  135. u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
  136. {
  137. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  138. lockdep_assert_held(&gpu->lock);
  139. /* initialize buffer */
  140. buffer->user_size = 0;
  141. CMD_WAIT(buffer);
  142. CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
  143. buffer->user_size - 4);
  144. return buffer->user_size / 8;
  145. }
  146. u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
  147. {
  148. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  149. lockdep_assert_held(&gpu->lock);
  150. buffer->user_size = 0;
  151. if (gpu->identity.features & chipFeatures_PIPE_3D) {
  152. CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
  153. VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
  154. CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
  155. mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
  156. CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
  157. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  158. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  159. }
  160. if (gpu->identity.features & chipFeatures_PIPE_2D) {
  161. CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
  162. VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
  163. CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
  164. mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
  165. CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
  166. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  167. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  168. }
  169. CMD_END(buffer);
  170. buffer->user_size = ALIGN(buffer->user_size, 8);
  171. return buffer->user_size / 8;
  172. }
  173. u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
  174. {
  175. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  176. lockdep_assert_held(&gpu->lock);
  177. buffer->user_size = 0;
  178. CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
  179. VIVS_MMUv2_PTA_CONFIG_INDEX(0));
  180. CMD_END(buffer);
  181. buffer->user_size = ALIGN(buffer->user_size, 8);
  182. return buffer->user_size / 8;
  183. }
  184. void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
  185. {
  186. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  187. unsigned int waitlink_offset = buffer->user_size - 16;
  188. u32 link_target, flush = 0;
  189. lockdep_assert_held(&gpu->lock);
  190. if (gpu->exec_state == ETNA_PIPE_2D)
  191. flush = VIVS_GL_FLUSH_CACHE_PE2D;
  192. else if (gpu->exec_state == ETNA_PIPE_3D)
  193. flush = VIVS_GL_FLUSH_CACHE_DEPTH |
  194. VIVS_GL_FLUSH_CACHE_COLOR |
  195. VIVS_GL_FLUSH_CACHE_TEXTURE |
  196. VIVS_GL_FLUSH_CACHE_TEXTUREVS |
  197. VIVS_GL_FLUSH_CACHE_SHADER_L2;
  198. if (flush) {
  199. unsigned int dwords = 7;
  200. link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
  201. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  202. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  203. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
  204. if (gpu->exec_state == ETNA_PIPE_3D)
  205. CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
  206. VIVS_TS_FLUSH_CACHE_FLUSH);
  207. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  208. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  209. CMD_END(buffer);
  210. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  211. VIV_FE_LINK_HEADER_OP_LINK |
  212. VIV_FE_LINK_HEADER_PREFETCH(dwords),
  213. link_target);
  214. } else {
  215. /* Replace the last link-wait with an "END" command */
  216. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  217. VIV_FE_END_HEADER_OP_END, 0);
  218. }
  219. }
  220. /* Append a 'sync point' to the ring buffer. */
  221. void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
  222. {
  223. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  224. unsigned int waitlink_offset = buffer->user_size - 16;
  225. u32 dwords, target;
  226. lockdep_assert_held(&gpu->lock);
  227. /*
  228. * We need at most 3 dwords in the return target:
  229. * 1 event + 1 end + 1 wait + 1 link.
  230. */
  231. dwords = 4;
  232. target = etnaviv_buffer_reserve(gpu, buffer, dwords);
  233. /* Signal sync point event */
  234. CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
  235. VIVS_GL_EVENT_FROM_PE);
  236. /* Stop the FE to 'pause' the GPU */
  237. CMD_END(buffer);
  238. /* Append waitlink */
  239. CMD_WAIT(buffer);
  240. CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
  241. buffer->user_size - 4);
  242. /*
  243. * Kick off the 'sync point' command by replacing the previous
  244. * WAIT with a link to the address in the ring buffer.
  245. */
  246. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  247. VIV_FE_LINK_HEADER_OP_LINK |
  248. VIV_FE_LINK_HEADER_PREFETCH(dwords),
  249. target);
  250. }
  251. /* Append a command buffer to the ring buffer. */
  252. void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
  253. unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
  254. {
  255. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  256. unsigned int waitlink_offset = buffer->user_size - 16;
  257. u32 return_target, return_dwords;
  258. u32 link_target, link_dwords;
  259. bool switch_context = gpu->exec_state != exec_state;
  260. lockdep_assert_held(&gpu->lock);
  261. if (drm_debug & DRM_UT_DRIVER)
  262. etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
  263. link_target = etnaviv_cmdbuf_get_va(cmdbuf);
  264. link_dwords = cmdbuf->size / 8;
  265. /*
  266. * If we need maintanence prior to submitting this buffer, we will
  267. * need to append a mmu flush load state, followed by a new
  268. * link to this buffer - a total of four additional words.
  269. */
  270. if (gpu->mmu->need_flush || switch_context) {
  271. u32 target, extra_dwords;
  272. /* link command */
  273. extra_dwords = 1;
  274. /* flush command */
  275. if (gpu->mmu->need_flush) {
  276. if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
  277. extra_dwords += 1;
  278. else
  279. extra_dwords += 3;
  280. }
  281. /* pipe switch commands */
  282. if (switch_context)
  283. extra_dwords += 4;
  284. target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
  285. if (gpu->mmu->need_flush) {
  286. /* Add the MMU flush */
  287. if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
  288. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
  289. VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
  290. VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
  291. VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
  292. VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
  293. VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
  294. } else {
  295. CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
  296. VIVS_MMUv2_CONFIGURATION_MODE_MASK |
  297. VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
  298. VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
  299. CMD_SEM(buffer, SYNC_RECIPIENT_FE,
  300. SYNC_RECIPIENT_PE);
  301. CMD_STALL(buffer, SYNC_RECIPIENT_FE,
  302. SYNC_RECIPIENT_PE);
  303. }
  304. gpu->mmu->need_flush = false;
  305. }
  306. if (switch_context) {
  307. etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
  308. gpu->exec_state = exec_state;
  309. }
  310. /* And the link to the submitted buffer */
  311. CMD_LINK(buffer, link_dwords, link_target);
  312. /* Update the link target to point to above instructions */
  313. link_target = target;
  314. link_dwords = extra_dwords;
  315. }
  316. /*
  317. * Append a LINK to the submitted command buffer to return to
  318. * the ring buffer. return_target is the ring target address.
  319. * We need at most 7 dwords in the return target: 2 cache flush +
  320. * 2 semaphore stall + 1 event + 1 wait + 1 link.
  321. */
  322. return_dwords = 7;
  323. return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
  324. CMD_LINK(cmdbuf, return_dwords, return_target);
  325. /*
  326. * Append a cache flush, stall, event, wait and link pointing back to
  327. * the wait command to the ring buffer.
  328. */
  329. if (gpu->exec_state == ETNA_PIPE_2D) {
  330. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
  331. VIVS_GL_FLUSH_CACHE_PE2D);
  332. } else {
  333. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
  334. VIVS_GL_FLUSH_CACHE_DEPTH |
  335. VIVS_GL_FLUSH_CACHE_COLOR);
  336. CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
  337. VIVS_TS_FLUSH_CACHE_FLUSH);
  338. }
  339. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  340. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  341. CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
  342. VIVS_GL_EVENT_FROM_PE);
  343. CMD_WAIT(buffer);
  344. CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
  345. buffer->user_size - 4);
  346. if (drm_debug & DRM_UT_DRIVER)
  347. pr_info("stream link to 0x%08x @ 0x%08x %p\n",
  348. return_target, etnaviv_cmdbuf_get_va(cmdbuf),
  349. cmdbuf->vaddr);
  350. if (drm_debug & DRM_UT_DRIVER) {
  351. print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
  352. cmdbuf->vaddr, cmdbuf->size, 0);
  353. pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
  354. pr_info("addr: 0x%08x\n", link_target);
  355. pr_info("back: 0x%08x\n", return_target);
  356. pr_info("event: %d\n", event);
  357. }
  358. /*
  359. * Kick off the submitted command by replacing the previous
  360. * WAIT with a link to the address in the ring buffer.
  361. */
  362. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  363. VIV_FE_LINK_HEADER_OP_LINK |
  364. VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
  365. link_target);
  366. if (drm_debug & DRM_UT_DRIVER)
  367. etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
  368. gpu->lastctx = cmdbuf->ctx;
  369. }