etnaviv_mmu.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include "etnaviv_drv.h"
  17. #include "etnaviv_gem.h"
  18. #include "etnaviv_gpu.h"
  19. #include "etnaviv_mmu.h"
  20. static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
  21. unsigned long iova, int flags, void *arg)
  22. {
  23. DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
  24. return 0;
  25. }
  26. int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
  27. struct sg_table *sgt, unsigned len, int prot)
  28. {
  29. struct iommu_domain *domain = iommu->domain;
  30. struct scatterlist *sg;
  31. unsigned int da = iova;
  32. unsigned int i, j;
  33. int ret;
  34. if (!domain || !sgt)
  35. return -EINVAL;
  36. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  37. u32 pa = sg_dma_address(sg) - sg->offset;
  38. size_t bytes = sg_dma_len(sg) + sg->offset;
  39. VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
  40. ret = iommu_map(domain, da, pa, bytes, prot);
  41. if (ret)
  42. goto fail;
  43. da += bytes;
  44. }
  45. return 0;
  46. fail:
  47. da = iova;
  48. for_each_sg(sgt->sgl, sg, i, j) {
  49. size_t bytes = sg_dma_len(sg) + sg->offset;
  50. iommu_unmap(domain, da, bytes);
  51. da += bytes;
  52. }
  53. return ret;
  54. }
  55. int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
  56. struct sg_table *sgt, unsigned len)
  57. {
  58. struct iommu_domain *domain = iommu->domain;
  59. struct scatterlist *sg;
  60. unsigned int da = iova;
  61. int i;
  62. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  63. size_t bytes = sg_dma_len(sg) + sg->offset;
  64. size_t unmapped;
  65. unmapped = iommu_unmap(domain, da, bytes);
  66. if (unmapped < bytes)
  67. return unmapped;
  68. VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
  69. BUG_ON(!PAGE_ALIGNED(bytes));
  70. da += bytes;
  71. }
  72. return 0;
  73. }
  74. static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
  75. struct etnaviv_vram_mapping *mapping)
  76. {
  77. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  78. etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
  79. etnaviv_obj->sgt, etnaviv_obj->base.size);
  80. drm_mm_remove_node(&mapping->vram_node);
  81. }
  82. int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
  83. struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
  84. struct etnaviv_vram_mapping *mapping)
  85. {
  86. struct etnaviv_vram_mapping *free = NULL;
  87. struct sg_table *sgt = etnaviv_obj->sgt;
  88. struct drm_mm_node *node;
  89. int ret;
  90. lockdep_assert_held(&etnaviv_obj->lock);
  91. mutex_lock(&mmu->lock);
  92. /* v1 MMU can optimize single entry (contiguous) scatterlists */
  93. if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
  94. u32 iova;
  95. iova = sg_dma_address(sgt->sgl) - memory_base;
  96. if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
  97. mapping->iova = iova;
  98. list_add_tail(&mapping->mmu_node, &mmu->mappings);
  99. mutex_unlock(&mmu->lock);
  100. return 0;
  101. }
  102. }
  103. node = &mapping->vram_node;
  104. while (1) {
  105. struct etnaviv_vram_mapping *m, *n;
  106. struct list_head list;
  107. bool found;
  108. ret = drm_mm_insert_node_in_range(&mmu->mm, node,
  109. etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
  110. DRM_MM_SEARCH_DEFAULT);
  111. if (ret != -ENOSPC)
  112. break;
  113. /*
  114. * If we did not search from the start of the MMU region,
  115. * try again in case there are free slots.
  116. */
  117. if (mmu->last_iova) {
  118. mmu->last_iova = 0;
  119. mmu->need_flush = true;
  120. continue;
  121. }
  122. /* Try to retire some entries */
  123. drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
  124. found = 0;
  125. INIT_LIST_HEAD(&list);
  126. list_for_each_entry(free, &mmu->mappings, mmu_node) {
  127. /* If this vram node has not been used, skip this. */
  128. if (!free->vram_node.mm)
  129. continue;
  130. /*
  131. * If the iova is pinned, then it's in-use,
  132. * so we must keep its mapping.
  133. */
  134. if (free->use)
  135. continue;
  136. list_add(&free->scan_node, &list);
  137. if (drm_mm_scan_add_block(&free->vram_node)) {
  138. found = true;
  139. break;
  140. }
  141. }
  142. if (!found) {
  143. /* Nothing found, clean up and fail */
  144. list_for_each_entry_safe(m, n, &list, scan_node)
  145. BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
  146. break;
  147. }
  148. /*
  149. * drm_mm does not allow any other operations while
  150. * scanning, so we have to remove all blocks first.
  151. * If drm_mm_scan_remove_block() returns false, we
  152. * can leave the block pinned.
  153. */
  154. list_for_each_entry_safe(m, n, &list, scan_node)
  155. if (!drm_mm_scan_remove_block(&m->vram_node))
  156. list_del_init(&m->scan_node);
  157. /*
  158. * Unmap the blocks which need to be reaped from the MMU.
  159. * Clear the mmu pointer to prevent the mapping_get finding
  160. * this mapping.
  161. */
  162. list_for_each_entry_safe(m, n, &list, scan_node) {
  163. etnaviv_iommu_remove_mapping(mmu, m);
  164. m->mmu = NULL;
  165. list_del_init(&m->mmu_node);
  166. list_del_init(&m->scan_node);
  167. }
  168. /*
  169. * We removed enough mappings so that the new allocation will
  170. * succeed. Ensure that the MMU will be flushed before the
  171. * associated commit requesting this mapping, and retry the
  172. * allocation one more time.
  173. */
  174. mmu->need_flush = true;
  175. }
  176. if (ret < 0) {
  177. mutex_unlock(&mmu->lock);
  178. return ret;
  179. }
  180. mmu->last_iova = node->start + etnaviv_obj->base.size;
  181. mapping->iova = node->start;
  182. ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
  183. IOMMU_READ | IOMMU_WRITE);
  184. if (ret < 0) {
  185. drm_mm_remove_node(node);
  186. mutex_unlock(&mmu->lock);
  187. return ret;
  188. }
  189. list_add_tail(&mapping->mmu_node, &mmu->mappings);
  190. mutex_unlock(&mmu->lock);
  191. return ret;
  192. }
  193. void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
  194. struct etnaviv_vram_mapping *mapping)
  195. {
  196. WARN_ON(mapping->use);
  197. mutex_lock(&mmu->lock);
  198. /* If the vram node is on the mm, unmap and remove the node */
  199. if (mapping->vram_node.mm == &mmu->mm)
  200. etnaviv_iommu_remove_mapping(mmu, mapping);
  201. list_del(&mapping->mmu_node);
  202. mutex_unlock(&mmu->lock);
  203. }
  204. void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
  205. {
  206. drm_mm_takedown(&mmu->mm);
  207. iommu_domain_free(mmu->domain);
  208. kfree(mmu);
  209. }
  210. struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
  211. struct iommu_domain *domain, enum etnaviv_iommu_version version)
  212. {
  213. struct etnaviv_iommu *mmu;
  214. mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
  215. if (!mmu)
  216. return ERR_PTR(-ENOMEM);
  217. mmu->domain = domain;
  218. mmu->gpu = gpu;
  219. mmu->version = version;
  220. mutex_init(&mmu->lock);
  221. INIT_LIST_HEAD(&mmu->mappings);
  222. drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
  223. domain->geometry.aperture_end -
  224. domain->geometry.aperture_start + 1);
  225. iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
  226. return mmu;
  227. }
  228. size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
  229. {
  230. struct etnaviv_iommu_ops *ops;
  231. ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
  232. return ops->dump_size(iommu->domain);
  233. }
  234. void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
  235. {
  236. struct etnaviv_iommu_ops *ops;
  237. ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
  238. ops->dump(iommu->domain, buf);
  239. }