etnaviv_mmu.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include "common.xml.h"
  6. #include "etnaviv_cmdbuf.h"
  7. #include "etnaviv_drv.h"
  8. #include "etnaviv_gem.h"
  9. #include "etnaviv_gpu.h"
  10. #include "etnaviv_iommu.h"
  11. #include "etnaviv_mmu.h"
  12. static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
  13. unsigned long iova, size_t size)
  14. {
  15. size_t unmapped_page, unmapped = 0;
  16. size_t pgsize = SZ_4K;
  17. if (!IS_ALIGNED(iova | size, pgsize)) {
  18. pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
  19. iova, size, pgsize);
  20. return;
  21. }
  22. while (unmapped < size) {
  23. unmapped_page = domain->ops->unmap(domain, iova, pgsize);
  24. if (!unmapped_page)
  25. break;
  26. iova += unmapped_page;
  27. unmapped += unmapped_page;
  28. }
  29. }
  30. static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
  31. unsigned long iova, phys_addr_t paddr,
  32. size_t size, int prot)
  33. {
  34. unsigned long orig_iova = iova;
  35. size_t pgsize = SZ_4K;
  36. size_t orig_size = size;
  37. int ret = 0;
  38. if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
  39. pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
  40. iova, &paddr, size, pgsize);
  41. return -EINVAL;
  42. }
  43. while (size) {
  44. ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
  45. if (ret)
  46. break;
  47. iova += pgsize;
  48. paddr += pgsize;
  49. size -= pgsize;
  50. }
  51. /* unroll mapping in case something went wrong */
  52. if (ret)
  53. etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
  54. return ret;
  55. }
  56. static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
  57. struct sg_table *sgt, unsigned len, int prot)
  58. {
  59. struct etnaviv_iommu_domain *domain = iommu->domain;
  60. struct scatterlist *sg;
  61. unsigned int da = iova;
  62. unsigned int i, j;
  63. int ret;
  64. if (!domain || !sgt)
  65. return -EINVAL;
  66. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  67. u32 pa = sg_dma_address(sg) - sg->offset;
  68. size_t bytes = sg_dma_len(sg) + sg->offset;
  69. VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
  70. ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
  71. if (ret)
  72. goto fail;
  73. da += bytes;
  74. }
  75. return 0;
  76. fail:
  77. da = iova;
  78. for_each_sg(sgt->sgl, sg, i, j) {
  79. size_t bytes = sg_dma_len(sg) + sg->offset;
  80. etnaviv_domain_unmap(domain, da, bytes);
  81. da += bytes;
  82. }
  83. return ret;
  84. }
  85. static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
  86. struct sg_table *sgt, unsigned len)
  87. {
  88. struct etnaviv_iommu_domain *domain = iommu->domain;
  89. struct scatterlist *sg;
  90. unsigned int da = iova;
  91. int i;
  92. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  93. size_t bytes = sg_dma_len(sg) + sg->offset;
  94. etnaviv_domain_unmap(domain, da, bytes);
  95. VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
  96. BUG_ON(!PAGE_ALIGNED(bytes));
  97. da += bytes;
  98. }
  99. }
  100. static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
  101. struct etnaviv_vram_mapping *mapping)
  102. {
  103. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  104. etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
  105. etnaviv_obj->sgt, etnaviv_obj->base.size);
  106. drm_mm_remove_node(&mapping->vram_node);
  107. }
  108. static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
  109. struct drm_mm_node *node, size_t size)
  110. {
  111. struct etnaviv_vram_mapping *free = NULL;
  112. enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
  113. int ret;
  114. lockdep_assert_held(&mmu->lock);
  115. while (1) {
  116. struct etnaviv_vram_mapping *m, *n;
  117. struct drm_mm_scan scan;
  118. struct list_head list;
  119. bool found;
  120. ret = drm_mm_insert_node_in_range(&mmu->mm, node,
  121. size, 0, 0, 0, U64_MAX, mode);
  122. if (ret != -ENOSPC)
  123. break;
  124. /* Try to retire some entries */
  125. drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
  126. found = 0;
  127. INIT_LIST_HEAD(&list);
  128. list_for_each_entry(free, &mmu->mappings, mmu_node) {
  129. /* If this vram node has not been used, skip this. */
  130. if (!free->vram_node.mm)
  131. continue;
  132. /*
  133. * If the iova is pinned, then it's in-use,
  134. * so we must keep its mapping.
  135. */
  136. if (free->use)
  137. continue;
  138. list_add(&free->scan_node, &list);
  139. if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
  140. found = true;
  141. break;
  142. }
  143. }
  144. if (!found) {
  145. /* Nothing found, clean up and fail */
  146. list_for_each_entry_safe(m, n, &list, scan_node)
  147. BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
  148. break;
  149. }
  150. /*
  151. * drm_mm does not allow any other operations while
  152. * scanning, so we have to remove all blocks first.
  153. * If drm_mm_scan_remove_block() returns false, we
  154. * can leave the block pinned.
  155. */
  156. list_for_each_entry_safe(m, n, &list, scan_node)
  157. if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
  158. list_del_init(&m->scan_node);
  159. /*
  160. * Unmap the blocks which need to be reaped from the MMU.
  161. * Clear the mmu pointer to prevent the mapping_get finding
  162. * this mapping.
  163. */
  164. list_for_each_entry_safe(m, n, &list, scan_node) {
  165. etnaviv_iommu_remove_mapping(mmu, m);
  166. m->mmu = NULL;
  167. list_del_init(&m->mmu_node);
  168. list_del_init(&m->scan_node);
  169. }
  170. mode = DRM_MM_INSERT_EVICT;
  171. /*
  172. * We removed enough mappings so that the new allocation will
  173. * succeed, retry the allocation one more time.
  174. */
  175. }
  176. return ret;
  177. }
  178. int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
  179. struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
  180. struct etnaviv_vram_mapping *mapping)
  181. {
  182. struct sg_table *sgt = etnaviv_obj->sgt;
  183. struct drm_mm_node *node;
  184. int ret;
  185. lockdep_assert_held(&etnaviv_obj->lock);
  186. mutex_lock(&mmu->lock);
  187. /* v1 MMU can optimize single entry (contiguous) scatterlists */
  188. if (mmu->version == ETNAVIV_IOMMU_V1 &&
  189. sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
  190. u32 iova;
  191. iova = sg_dma_address(sgt->sgl) - memory_base;
  192. if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
  193. mapping->iova = iova;
  194. list_add_tail(&mapping->mmu_node, &mmu->mappings);
  195. ret = 0;
  196. goto unlock;
  197. }
  198. }
  199. node = &mapping->vram_node;
  200. ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
  201. if (ret < 0)
  202. goto unlock;
  203. mapping->iova = node->start;
  204. ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
  205. ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
  206. if (ret < 0) {
  207. drm_mm_remove_node(node);
  208. goto unlock;
  209. }
  210. list_add_tail(&mapping->mmu_node, &mmu->mappings);
  211. mmu->need_flush = true;
  212. unlock:
  213. mutex_unlock(&mmu->lock);
  214. return ret;
  215. }
  216. void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
  217. struct etnaviv_vram_mapping *mapping)
  218. {
  219. WARN_ON(mapping->use);
  220. mutex_lock(&mmu->lock);
  221. /* If the vram node is on the mm, unmap and remove the node */
  222. if (mapping->vram_node.mm == &mmu->mm)
  223. etnaviv_iommu_remove_mapping(mmu, mapping);
  224. list_del(&mapping->mmu_node);
  225. mmu->need_flush = true;
  226. mutex_unlock(&mmu->lock);
  227. }
  228. void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
  229. {
  230. drm_mm_takedown(&mmu->mm);
  231. mmu->domain->ops->free(mmu->domain);
  232. kfree(mmu);
  233. }
  234. struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
  235. {
  236. enum etnaviv_iommu_version version;
  237. struct etnaviv_iommu *mmu;
  238. mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
  239. if (!mmu)
  240. return ERR_PTR(-ENOMEM);
  241. if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
  242. mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
  243. version = ETNAVIV_IOMMU_V1;
  244. } else {
  245. mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
  246. version = ETNAVIV_IOMMU_V2;
  247. }
  248. if (!mmu->domain) {
  249. dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
  250. kfree(mmu);
  251. return ERR_PTR(-ENOMEM);
  252. }
  253. mmu->gpu = gpu;
  254. mmu->version = version;
  255. mutex_init(&mmu->lock);
  256. INIT_LIST_HEAD(&mmu->mappings);
  257. drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
  258. return mmu;
  259. }
  260. void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
  261. {
  262. if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
  263. etnaviv_iommuv1_restore(gpu);
  264. else
  265. etnaviv_iommuv2_restore(gpu);
  266. }
  267. int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
  268. struct drm_mm_node *vram_node, size_t size,
  269. u32 *iova)
  270. {
  271. struct etnaviv_iommu *mmu = gpu->mmu;
  272. if (mmu->version == ETNAVIV_IOMMU_V1) {
  273. *iova = paddr - gpu->memory_base;
  274. return 0;
  275. } else {
  276. int ret;
  277. mutex_lock(&mmu->lock);
  278. ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
  279. if (ret < 0) {
  280. mutex_unlock(&mmu->lock);
  281. return ret;
  282. }
  283. ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
  284. size, ETNAVIV_PROT_READ);
  285. if (ret < 0) {
  286. drm_mm_remove_node(vram_node);
  287. mutex_unlock(&mmu->lock);
  288. return ret;
  289. }
  290. gpu->mmu->need_flush = true;
  291. mutex_unlock(&mmu->lock);
  292. *iova = (u32)vram_node->start;
  293. return 0;
  294. }
  295. }
  296. void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
  297. struct drm_mm_node *vram_node, size_t size,
  298. u32 iova)
  299. {
  300. struct etnaviv_iommu *mmu = gpu->mmu;
  301. if (mmu->version == ETNAVIV_IOMMU_V2) {
  302. mutex_lock(&mmu->lock);
  303. etnaviv_domain_unmap(mmu->domain, iova, size);
  304. drm_mm_remove_node(vram_node);
  305. mutex_unlock(&mmu->lock);
  306. }
  307. }
  308. size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
  309. {
  310. return iommu->domain->ops->dump_size(iommu->domain);
  311. }
  312. void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
  313. {
  314. iommu->domain->ops->dump(iommu->domain, buf);
  315. }