etnaviv_iommu_v2.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /*
  2. * Copyright (C) 2016 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/platform_device.h>
  17. #include <linux/sizes.h>
  18. #include <linux/slab.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/bitops.h>
  21. #include "etnaviv_cmdbuf.h"
  22. #include "etnaviv_gpu.h"
  23. #include "etnaviv_mmu.h"
  24. #include "etnaviv_iommu.h"
  25. #include "state.xml.h"
  26. #include "state_hi.xml.h"
  27. #define MMUv2_PTE_PRESENT BIT(0)
  28. #define MMUv2_PTE_EXCEPTION BIT(1)
  29. #define MMUv2_PTE_WRITEABLE BIT(2)
  30. #define MMUv2_MTLB_MASK 0xffc00000
  31. #define MMUv2_MTLB_SHIFT 22
  32. #define MMUv2_STLB_MASK 0x003ff000
  33. #define MMUv2_STLB_SHIFT 12
  34. #define MMUv2_MAX_STLB_ENTRIES 1024
  35. struct etnaviv_iommuv2_domain {
  36. struct etnaviv_iommu_domain base;
  37. /* P(age) T(able) A(rray) */
  38. u64 *pta_cpu;
  39. dma_addr_t pta_dma;
  40. /* M(aster) TLB aka first level pagetable */
  41. u32 *mtlb_cpu;
  42. dma_addr_t mtlb_dma;
  43. /* S(lave) TLB aka second level pagetable */
  44. u32 *stlb_cpu[1024];
  45. dma_addr_t stlb_dma[1024];
  46. };
  47. static struct etnaviv_iommuv2_domain *
  48. to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
  49. {
  50. return container_of(domain, struct etnaviv_iommuv2_domain, base);
  51. }
  52. static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
  53. unsigned long iova, phys_addr_t paddr,
  54. size_t size, int prot)
  55. {
  56. struct etnaviv_iommuv2_domain *etnaviv_domain =
  57. to_etnaviv_domain(domain);
  58. int mtlb_entry, stlb_entry;
  59. u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
  60. if (size != SZ_4K)
  61. return -EINVAL;
  62. if (prot & ETNAVIV_PROT_WRITE)
  63. entry |= MMUv2_PTE_WRITEABLE;
  64. mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
  65. stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
  66. etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
  67. return 0;
  68. }
  69. static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
  70. unsigned long iova, size_t size)
  71. {
  72. struct etnaviv_iommuv2_domain *etnaviv_domain =
  73. to_etnaviv_domain(domain);
  74. int mtlb_entry, stlb_entry;
  75. if (size != SZ_4K)
  76. return -EINVAL;
  77. mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
  78. stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
  79. etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
  80. return SZ_4K;
  81. }
  82. static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
  83. {
  84. u32 *p;
  85. int ret, i, j;
  86. /* allocate scratch page */
  87. etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
  88. etnaviv_domain->base.dev,
  89. SZ_4K,
  90. &etnaviv_domain->base.bad_page_dma,
  91. GFP_KERNEL);
  92. if (!etnaviv_domain->base.bad_page_cpu) {
  93. ret = -ENOMEM;
  94. goto fail_mem;
  95. }
  96. p = etnaviv_domain->base.bad_page_cpu;
  97. for (i = 0; i < SZ_4K / 4; i++)
  98. *p++ = 0xdead55aa;
  99. etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
  100. SZ_4K,
  101. &etnaviv_domain->pta_dma,
  102. GFP_KERNEL);
  103. if (!etnaviv_domain->pta_cpu) {
  104. ret = -ENOMEM;
  105. goto fail_mem;
  106. }
  107. etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
  108. SZ_4K,
  109. &etnaviv_domain->mtlb_dma,
  110. GFP_KERNEL);
  111. if (!etnaviv_domain->mtlb_cpu) {
  112. ret = -ENOMEM;
  113. goto fail_mem;
  114. }
  115. /* pre-populate STLB pages (may want to switch to on-demand later) */
  116. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
  117. etnaviv_domain->stlb_cpu[i] =
  118. dma_alloc_coherent(etnaviv_domain->base.dev,
  119. SZ_4K,
  120. &etnaviv_domain->stlb_dma[i],
  121. GFP_KERNEL);
  122. if (!etnaviv_domain->stlb_cpu[i]) {
  123. ret = -ENOMEM;
  124. goto fail_mem;
  125. }
  126. p = etnaviv_domain->stlb_cpu[i];
  127. for (j = 0; j < SZ_4K / 4; j++)
  128. *p++ = MMUv2_PTE_EXCEPTION;
  129. etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
  130. MMUv2_PTE_PRESENT;
  131. }
  132. return 0;
  133. fail_mem:
  134. if (etnaviv_domain->base.bad_page_cpu)
  135. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  136. etnaviv_domain->base.bad_page_cpu,
  137. etnaviv_domain->base.bad_page_dma);
  138. if (etnaviv_domain->pta_cpu)
  139. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  140. etnaviv_domain->pta_cpu,
  141. etnaviv_domain->pta_dma);
  142. if (etnaviv_domain->mtlb_cpu)
  143. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  144. etnaviv_domain->mtlb_cpu,
  145. etnaviv_domain->mtlb_dma);
  146. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
  147. if (etnaviv_domain->stlb_cpu[i])
  148. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  149. etnaviv_domain->stlb_cpu[i],
  150. etnaviv_domain->stlb_dma[i]);
  151. }
  152. return ret;
  153. }
  154. static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
  155. {
  156. struct etnaviv_iommuv2_domain *etnaviv_domain =
  157. to_etnaviv_domain(domain);
  158. int i;
  159. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  160. etnaviv_domain->base.bad_page_cpu,
  161. etnaviv_domain->base.bad_page_dma);
  162. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  163. etnaviv_domain->pta_cpu,
  164. etnaviv_domain->pta_dma);
  165. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  166. etnaviv_domain->mtlb_cpu,
  167. etnaviv_domain->mtlb_dma);
  168. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
  169. if (etnaviv_domain->stlb_cpu[i])
  170. dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
  171. etnaviv_domain->stlb_cpu[i],
  172. etnaviv_domain->stlb_dma[i]);
  173. }
  174. vfree(etnaviv_domain);
  175. }
  176. static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
  177. {
  178. struct etnaviv_iommuv2_domain *etnaviv_domain =
  179. to_etnaviv_domain(domain);
  180. size_t dump_size = SZ_4K;
  181. int i;
  182. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
  183. if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
  184. dump_size += SZ_4K;
  185. return dump_size;
  186. }
  187. static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
  188. {
  189. struct etnaviv_iommuv2_domain *etnaviv_domain =
  190. to_etnaviv_domain(domain);
  191. int i;
  192. memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
  193. buf += SZ_4K;
  194. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
  195. if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
  196. memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
  197. }
  198. static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
  199. {
  200. struct etnaviv_iommuv2_domain *etnaviv_domain =
  201. to_etnaviv_domain(gpu->mmu->domain);
  202. u16 prefetch;
  203. /* If the MMU is already enabled the state is still there. */
  204. if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
  205. return;
  206. prefetch = etnaviv_buffer_config_mmuv2(gpu,
  207. (u32)etnaviv_domain->mtlb_dma,
  208. (u32)etnaviv_domain->base.bad_page_dma);
  209. etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
  210. prefetch);
  211. etnaviv_gpu_wait_idle(gpu, 100);
  212. gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
  213. }
  214. static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
  215. {
  216. struct etnaviv_iommuv2_domain *etnaviv_domain =
  217. to_etnaviv_domain(gpu->mmu->domain);
  218. u16 prefetch;
  219. /* If the MMU is already enabled the state is still there. */
  220. if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
  221. return;
  222. gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
  223. lower_32_bits(etnaviv_domain->pta_dma));
  224. gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
  225. upper_32_bits(etnaviv_domain->pta_dma));
  226. gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
  227. gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
  228. lower_32_bits(etnaviv_domain->base.bad_page_dma));
  229. gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
  230. lower_32_bits(etnaviv_domain->base.bad_page_dma));
  231. gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
  232. VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
  233. upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
  234. VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
  235. upper_32_bits(etnaviv_domain->base.bad_page_dma)));
  236. etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
  237. VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
  238. /* trigger a PTA load through the FE */
  239. prefetch = etnaviv_buffer_config_pta(gpu);
  240. etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
  241. prefetch);
  242. etnaviv_gpu_wait_idle(gpu, 100);
  243. gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
  244. }
  245. void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
  246. {
  247. switch (gpu->sec_mode) {
  248. case ETNA_SEC_NONE:
  249. etnaviv_iommuv2_restore_nonsec(gpu);
  250. break;
  251. case ETNA_SEC_KERNEL:
  252. etnaviv_iommuv2_restore_sec(gpu);
  253. break;
  254. default:
  255. WARN(1, "unhandled GPU security mode\n");
  256. break;
  257. }
  258. }
  259. static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
  260. .free = etnaviv_iommuv2_domain_free,
  261. .map = etnaviv_iommuv2_map,
  262. .unmap = etnaviv_iommuv2_unmap,
  263. .dump_size = etnaviv_iommuv2_dump_size,
  264. .dump = etnaviv_iommuv2_dump,
  265. };
  266. struct etnaviv_iommu_domain *
  267. etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
  268. {
  269. struct etnaviv_iommuv2_domain *etnaviv_domain;
  270. struct etnaviv_iommu_domain *domain;
  271. int ret;
  272. etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
  273. if (!etnaviv_domain)
  274. return NULL;
  275. domain = &etnaviv_domain->base;
  276. domain->dev = gpu->dev;
  277. domain->base = 0;
  278. domain->size = (u64)SZ_1G * 4;
  279. domain->ops = &etnaviv_iommuv2_ops;
  280. ret = etnaviv_iommuv2_init(etnaviv_domain);
  281. if (ret)
  282. goto out_free;
  283. return &etnaviv_domain->base;
  284. out_free:
  285. vfree(etnaviv_domain);
  286. return NULL;
  287. }