a3xx_gpu.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #ifdef CONFIG_MSM_OCMEM
  18. # include <mach/ocmem.h>
  19. #endif
  20. #include "a3xx_gpu.h"
  21. #define A3XX_INT0_MASK \
  22. (A3XX_INT0_RBBM_AHB_ERROR | \
  23. A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
  24. A3XX_INT0_CP_T0_PACKET_IN_IB | \
  25. A3XX_INT0_CP_OPCODE_ERROR | \
  26. A3XX_INT0_CP_RESERVED_BIT_ERROR | \
  27. A3XX_INT0_CP_HW_FAULT | \
  28. A3XX_INT0_CP_IB1_INT | \
  29. A3XX_INT0_CP_IB2_INT | \
  30. A3XX_INT0_CP_RB_INT | \
  31. A3XX_INT0_CP_REG_PROTECT_FAULT | \
  32. A3XX_INT0_CP_AHB_ERROR_HALT | \
  33. A3XX_INT0_UCHE_OOB_ACCESS)
  34. static bool hang_debug = false;
  35. MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
  36. module_param_named(hang_debug, hang_debug, bool, 0600);
  37. static void a3xx_dump(struct msm_gpu *gpu);
  38. static void a3xx_me_init(struct msm_gpu *gpu)
  39. {
  40. struct msm_ringbuffer *ring = gpu->rb;
  41. OUT_PKT3(ring, CP_ME_INIT, 17);
  42. OUT_RING(ring, 0x000003f7);
  43. OUT_RING(ring, 0x00000000);
  44. OUT_RING(ring, 0x00000000);
  45. OUT_RING(ring, 0x00000000);
  46. OUT_RING(ring, 0x00000080);
  47. OUT_RING(ring, 0x00000100);
  48. OUT_RING(ring, 0x00000180);
  49. OUT_RING(ring, 0x00006600);
  50. OUT_RING(ring, 0x00000150);
  51. OUT_RING(ring, 0x0000014e);
  52. OUT_RING(ring, 0x00000154);
  53. OUT_RING(ring, 0x00000001);
  54. OUT_RING(ring, 0x00000000);
  55. OUT_RING(ring, 0x00000000);
  56. OUT_RING(ring, 0x00000000);
  57. OUT_RING(ring, 0x00000000);
  58. OUT_RING(ring, 0x00000000);
  59. gpu->funcs->flush(gpu);
  60. gpu->funcs->idle(gpu);
  61. }
  62. static int a3xx_hw_init(struct msm_gpu *gpu)
  63. {
  64. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  65. struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
  66. uint32_t *ptr, len;
  67. int i, ret;
  68. DBG("%s", gpu->name);
  69. if (adreno_is_a305(adreno_gpu)) {
  70. /* Set up 16 deep read/write request queues: */
  71. gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
  72. gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
  73. gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
  74. gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
  75. gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
  76. gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
  77. gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
  78. /* Enable WR-REQ: */
  79. gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
  80. /* Set up round robin arbitration between both AXI ports: */
  81. gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
  82. /* Set up AOOO: */
  83. gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
  84. gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
  85. } else if (adreno_is_a320(adreno_gpu)) {
  86. /* Set up 16 deep read/write request queues: */
  87. gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
  88. gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
  89. gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
  90. gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
  91. gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
  92. gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
  93. gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
  94. /* Enable WR-REQ: */
  95. gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
  96. /* Set up round robin arbitration between both AXI ports: */
  97. gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
  98. /* Set up AOOO: */
  99. gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
  100. gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
  101. /* Enable 1K sort: */
  102. gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
  103. gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
  104. } else if (adreno_is_a330v2(adreno_gpu)) {
  105. /*
  106. * Most of the VBIF registers on 8974v2 have the correct
  107. * values at power on, so we won't modify those if we don't
  108. * need to
  109. */
  110. /* Enable 1k sort: */
  111. gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
  112. gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
  113. /* Enable WR-REQ: */
  114. gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
  115. gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
  116. /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
  117. gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
  118. } else if (adreno_is_a330(adreno_gpu)) {
  119. /* Set up 16 deep read/write request queues: */
  120. gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
  121. gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
  122. gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
  123. gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
  124. gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
  125. gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
  126. gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
  127. /* Enable WR-REQ: */
  128. gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
  129. /* Set up round robin arbitration between both AXI ports: */
  130. gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
  131. /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
  132. gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
  133. /* Set up AOOO: */
  134. gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
  135. gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
  136. /* Enable 1K sort: */
  137. gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
  138. gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
  139. /* Disable VBIF clock gating. This is to enable AXI running
  140. * higher frequency than GPU:
  141. */
  142. gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
  143. } else {
  144. BUG();
  145. }
  146. /* Make all blocks contribute to the GPU BUSY perf counter: */
  147. gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
  148. /* Tune the hystersis counters for SP and CP idle detection: */
  149. gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
  150. gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
  151. /* Enable the RBBM error reporting bits. This lets us get
  152. * useful information on failure:
  153. */
  154. gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
  155. /* Enable AHB error reporting: */
  156. gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
  157. /* Turn on the power counters: */
  158. gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
  159. /* Turn on hang detection - this spews a lot of useful information
  160. * into the RBBM registers on a hang:
  161. */
  162. gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
  163. /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
  164. gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
  165. /* Enable Clock gating: */
  166. if (adreno_is_a320(adreno_gpu))
  167. gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
  168. else if (adreno_is_a330v2(adreno_gpu))
  169. gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
  170. else if (adreno_is_a330(adreno_gpu))
  171. gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
  172. if (adreno_is_a330v2(adreno_gpu))
  173. gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
  174. else if (adreno_is_a330(adreno_gpu))
  175. gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
  176. /* Set the OCMEM base address for A330, etc */
  177. if (a3xx_gpu->ocmem_hdl) {
  178. gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
  179. (unsigned int)(a3xx_gpu->ocmem_base >> 14));
  180. }
  181. /* Turn on performance counters: */
  182. gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
  183. /* Enable the perfcntrs that we use.. */
  184. for (i = 0; i < gpu->num_perfcntrs; i++) {
  185. const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
  186. gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
  187. }
  188. gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
  189. ret = adreno_hw_init(gpu);
  190. if (ret)
  191. return ret;
  192. /* setup access protection: */
  193. gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
  194. /* RBBM registers */
  195. gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
  196. gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
  197. gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
  198. gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
  199. gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
  200. gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
  201. /* CP registers */
  202. gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
  203. gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
  204. gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
  205. gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
  206. gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
  207. /* RB registers */
  208. gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
  209. /* VBIF registers */
  210. gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
  211. /* NOTE: PM4/micro-engine firmware registers look to be the same
  212. * for a2xx and a3xx.. we could possibly push that part down to
  213. * adreno_gpu base class. Or push both PM4 and PFP but
  214. * parameterize the pfp ucode addr/data registers..
  215. */
  216. /* Load PM4: */
  217. ptr = (uint32_t *)(adreno_gpu->pm4->data);
  218. len = adreno_gpu->pm4->size / 4;
  219. DBG("loading PM4 ucode version: %x", ptr[1]);
  220. gpu_write(gpu, REG_AXXX_CP_DEBUG,
  221. AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
  222. AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
  223. gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
  224. for (i = 1; i < len; i++)
  225. gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
  226. /* Load PFP: */
  227. ptr = (uint32_t *)(adreno_gpu->pfp->data);
  228. len = adreno_gpu->pfp->size / 4;
  229. DBG("loading PFP ucode version: %x", ptr[5]);
  230. gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
  231. for (i = 1; i < len; i++)
  232. gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
  233. /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
  234. if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
  235. gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
  236. AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
  237. AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
  238. AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
  239. } else if (adreno_is_a330(adreno_gpu)) {
  240. /* NOTE: this (value take from downstream android driver)
  241. * includes some bits outside of the known bitfields. But
  242. * A330 has this "MERCIU queue" thing too, which might
  243. * explain a new bitfield or reshuffling:
  244. */
  245. gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
  246. }
  247. /* clear ME_HALT to start micro engine */
  248. gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
  249. a3xx_me_init(gpu);
  250. return 0;
  251. }
  252. static void a3xx_recover(struct msm_gpu *gpu)
  253. {
  254. /* dump registers before resetting gpu, if enabled: */
  255. if (hang_debug)
  256. a3xx_dump(gpu);
  257. gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
  258. gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
  259. gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
  260. adreno_recover(gpu);
  261. }
  262. static void a3xx_destroy(struct msm_gpu *gpu)
  263. {
  264. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  265. struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
  266. DBG("%s", gpu->name);
  267. adreno_gpu_cleanup(adreno_gpu);
  268. #ifdef CONFIG_MSM_OCMEM
  269. if (a3xx_gpu->ocmem_base)
  270. ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
  271. #endif
  272. kfree(a3xx_gpu);
  273. }
  274. static void a3xx_idle(struct msm_gpu *gpu)
  275. {
  276. /* wait for ringbuffer to drain: */
  277. adreno_idle(gpu);
  278. /* then wait for GPU to finish: */
  279. if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
  280. A3XX_RBBM_STATUS_GPU_BUSY)))
  281. DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
  282. /* TODO maybe we need to reset GPU here to recover from hang? */
  283. }
  284. static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
  285. {
  286. uint32_t status;
  287. status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
  288. DBG("%s: %08x", gpu->name, status);
  289. // TODO
  290. gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
  291. msm_gpu_retire(gpu);
  292. return IRQ_HANDLED;
  293. }
  294. static const unsigned int a3xx_registers[] = {
  295. 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
  296. 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
  297. 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
  298. 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
  299. 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
  300. 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
  301. 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
  302. 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
  303. 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
  304. 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
  305. 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
  306. 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
  307. 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
  308. 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
  309. 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
  310. 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
  311. 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
  312. 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
  313. 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
  314. 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
  315. 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
  316. 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
  317. 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
  318. 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
  319. 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
  320. 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
  321. 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
  322. 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
  323. 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
  324. 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
  325. 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
  326. 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
  327. 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
  328. 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
  329. 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
  330. 0x303c, 0x303c, 0x305e, 0x305f,
  331. };
  332. #ifdef CONFIG_DEBUG_FS
  333. static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
  334. {
  335. struct drm_device *dev = gpu->dev;
  336. int i;
  337. adreno_show(gpu, m);
  338. mutex_lock(&dev->struct_mutex);
  339. gpu->funcs->pm_resume(gpu);
  340. seq_printf(m, "status: %08x\n",
  341. gpu_read(gpu, REG_A3XX_RBBM_STATUS));
  342. /* dump these out in a form that can be parsed by demsm: */
  343. seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
  344. for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
  345. uint32_t start = a3xx_registers[i];
  346. uint32_t end = a3xx_registers[i+1];
  347. uint32_t addr;
  348. for (addr = start; addr <= end; addr++) {
  349. uint32_t val = gpu_read(gpu, addr);
  350. seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
  351. }
  352. }
  353. gpu->funcs->pm_suspend(gpu);
  354. mutex_unlock(&dev->struct_mutex);
  355. }
  356. #endif
  357. /* would be nice to not have to duplicate the _show() stuff with printk(): */
  358. static void a3xx_dump(struct msm_gpu *gpu)
  359. {
  360. int i;
  361. adreno_dump(gpu);
  362. printk("status: %08x\n",
  363. gpu_read(gpu, REG_A3XX_RBBM_STATUS));
  364. /* dump these out in a form that can be parsed by demsm: */
  365. printk("IO:region %s 00000000 00020000\n", gpu->name);
  366. for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
  367. uint32_t start = a3xx_registers[i];
  368. uint32_t end = a3xx_registers[i+1];
  369. uint32_t addr;
  370. for (addr = start; addr <= end; addr++) {
  371. uint32_t val = gpu_read(gpu, addr);
  372. printk("IO:R %08x %08x\n", addr<<2, val);
  373. }
  374. }
  375. }
  376. static const struct adreno_gpu_funcs funcs = {
  377. .base = {
  378. .get_param = adreno_get_param,
  379. .hw_init = a3xx_hw_init,
  380. .pm_suspend = msm_gpu_pm_suspend,
  381. .pm_resume = msm_gpu_pm_resume,
  382. .recover = a3xx_recover,
  383. .last_fence = adreno_last_fence,
  384. .submit = adreno_submit,
  385. .flush = adreno_flush,
  386. .idle = a3xx_idle,
  387. .irq = a3xx_irq,
  388. .destroy = a3xx_destroy,
  389. #ifdef CONFIG_DEBUG_FS
  390. .show = a3xx_show,
  391. #endif
  392. },
  393. };
  394. static const struct msm_gpu_perfcntr perfcntrs[] = {
  395. { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
  396. SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
  397. { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
  398. SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
  399. };
  400. struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
  401. {
  402. struct a3xx_gpu *a3xx_gpu = NULL;
  403. struct adreno_gpu *adreno_gpu;
  404. struct msm_gpu *gpu;
  405. struct msm_drm_private *priv = dev->dev_private;
  406. struct platform_device *pdev = priv->gpu_pdev;
  407. struct adreno_platform_config *config;
  408. int ret;
  409. if (!pdev) {
  410. dev_err(dev->dev, "no a3xx device\n");
  411. ret = -ENXIO;
  412. goto fail;
  413. }
  414. config = pdev->dev.platform_data;
  415. a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
  416. if (!a3xx_gpu) {
  417. ret = -ENOMEM;
  418. goto fail;
  419. }
  420. adreno_gpu = &a3xx_gpu->base;
  421. gpu = &adreno_gpu->base;
  422. a3xx_gpu->pdev = pdev;
  423. gpu->fast_rate = config->fast_rate;
  424. gpu->slow_rate = config->slow_rate;
  425. gpu->bus_freq = config->bus_freq;
  426. #ifdef CONFIG_MSM_BUS_SCALING
  427. gpu->bus_scale_table = config->bus_scale_table;
  428. #endif
  429. DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
  430. gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
  431. gpu->perfcntrs = perfcntrs;
  432. gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
  433. ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
  434. if (ret)
  435. goto fail;
  436. /* if needed, allocate gmem: */
  437. if (adreno_is_a330(adreno_gpu)) {
  438. #ifdef CONFIG_MSM_OCMEM
  439. /* TODO this is different/missing upstream: */
  440. struct ocmem_buf *ocmem_hdl =
  441. ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
  442. a3xx_gpu->ocmem_hdl = ocmem_hdl;
  443. a3xx_gpu->ocmem_base = ocmem_hdl->addr;
  444. adreno_gpu->gmem = ocmem_hdl->len;
  445. DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
  446. a3xx_gpu->ocmem_base);
  447. #endif
  448. }
  449. if (!gpu->mmu) {
  450. /* TODO we think it is possible to configure the GPU to
  451. * restrict access to VRAM carveout. But the required
  452. * registers are unknown. For now just bail out and
  453. * limp along with just modesetting. If it turns out
  454. * to not be possible to restrict access, then we must
  455. * implement a cmdstream validator.
  456. */
  457. dev_err(dev->dev, "No memory protection without IOMMU\n");
  458. ret = -ENXIO;
  459. goto fail;
  460. }
  461. return gpu;
  462. fail:
  463. if (a3xx_gpu)
  464. a3xx_destroy(&a3xx_gpu->base.base);
  465. return ERR_PTR(ret);
  466. }
  467. /*
  468. * The a3xx device:
  469. */
  470. #if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
  471. # include <mach/kgsl.h>
  472. #endif
  473. static void set_gpu_pdev(struct drm_device *dev,
  474. struct platform_device *pdev)
  475. {
  476. struct msm_drm_private *priv = dev->dev_private;
  477. priv->gpu_pdev = pdev;
  478. }
  479. static int a3xx_bind(struct device *dev, struct device *master, void *data)
  480. {
  481. static struct adreno_platform_config config = {};
  482. #ifdef CONFIG_OF
  483. struct device_node *child, *node = dev->of_node;
  484. u32 val;
  485. int ret;
  486. ret = of_property_read_u32(node, "qcom,chipid", &val);
  487. if (ret) {
  488. dev_err(dev, "could not find chipid: %d\n", ret);
  489. return ret;
  490. }
  491. config.rev = ADRENO_REV((val >> 24) & 0xff,
  492. (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
  493. /* find clock rates: */
  494. config.fast_rate = 0;
  495. config.slow_rate = ~0;
  496. for_each_child_of_node(node, child) {
  497. if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
  498. struct device_node *pwrlvl;
  499. for_each_child_of_node(child, pwrlvl) {
  500. ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
  501. if (ret) {
  502. dev_err(dev, "could not find gpu-freq: %d\n", ret);
  503. return ret;
  504. }
  505. config.fast_rate = max(config.fast_rate, val);
  506. config.slow_rate = min(config.slow_rate, val);
  507. }
  508. }
  509. }
  510. if (!config.fast_rate) {
  511. dev_err(dev, "could not find clk rates\n");
  512. return -ENXIO;
  513. }
  514. #else
  515. struct kgsl_device_platform_data *pdata = dev->platform_data;
  516. uint32_t version = socinfo_get_version();
  517. if (cpu_is_apq8064ab()) {
  518. config.fast_rate = 450000000;
  519. config.slow_rate = 27000000;
  520. config.bus_freq = 4;
  521. config.rev = ADRENO_REV(3, 2, 1, 0);
  522. } else if (cpu_is_apq8064()) {
  523. config.fast_rate = 400000000;
  524. config.slow_rate = 27000000;
  525. config.bus_freq = 4;
  526. if (SOCINFO_VERSION_MAJOR(version) == 2)
  527. config.rev = ADRENO_REV(3, 2, 0, 2);
  528. else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
  529. (SOCINFO_VERSION_MINOR(version) == 1))
  530. config.rev = ADRENO_REV(3, 2, 0, 1);
  531. else
  532. config.rev = ADRENO_REV(3, 2, 0, 0);
  533. } else if (cpu_is_msm8960ab()) {
  534. config.fast_rate = 400000000;
  535. config.slow_rate = 320000000;
  536. config.bus_freq = 4;
  537. if (SOCINFO_VERSION_MINOR(version) == 0)
  538. config.rev = ADRENO_REV(3, 2, 1, 0);
  539. else
  540. config.rev = ADRENO_REV(3, 2, 1, 1);
  541. } else if (cpu_is_msm8930()) {
  542. config.fast_rate = 400000000;
  543. config.slow_rate = 27000000;
  544. config.bus_freq = 3;
  545. if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
  546. (SOCINFO_VERSION_MINOR(version) == 2))
  547. config.rev = ADRENO_REV(3, 0, 5, 2);
  548. else
  549. config.rev = ADRENO_REV(3, 0, 5, 0);
  550. }
  551. # ifdef CONFIG_MSM_BUS_SCALING
  552. config.bus_scale_table = pdata->bus_scale_table;
  553. # endif
  554. #endif
  555. dev->platform_data = &config;
  556. set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
  557. return 0;
  558. }
  559. static void a3xx_unbind(struct device *dev, struct device *master,
  560. void *data)
  561. {
  562. set_gpu_pdev(dev_get_drvdata(master), NULL);
  563. }
  564. static const struct component_ops a3xx_ops = {
  565. .bind = a3xx_bind,
  566. .unbind = a3xx_unbind,
  567. };
  568. static int a3xx_probe(struct platform_device *pdev)
  569. {
  570. return component_add(&pdev->dev, &a3xx_ops);
  571. }
  572. static int a3xx_remove(struct platform_device *pdev)
  573. {
  574. component_del(&pdev->dev, &a3xx_ops);
  575. return 0;
  576. }
  577. static const struct of_device_id dt_match[] = {
  578. { .compatible = "qcom,kgsl-3d0" },
  579. {}
  580. };
  581. static struct platform_driver a3xx_driver = {
  582. .probe = a3xx_probe,
  583. .remove = a3xx_remove,
  584. .driver = {
  585. .name = "kgsl-3d0",
  586. .of_match_table = dt_match,
  587. },
  588. };
  589. void __init a3xx_register(void)
  590. {
  591. platform_driver_register(&a3xx_driver);
  592. }
  593. void __exit a3xx_unregister(void)
  594. {
  595. platform_driver_unregister(&a3xx_driver);
  596. }