etnaviv_mmu.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include "common.xml.h"
  17. #include "etnaviv_cmdbuf.h"
  18. #include "etnaviv_drv.h"
  19. #include "etnaviv_gem.h"
  20. #include "etnaviv_gpu.h"
  21. #include "etnaviv_iommu.h"
  22. #include "etnaviv_mmu.h"
  23. static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
  24. unsigned long iova, int flags, void *arg)
  25. {
  26. DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
  27. return 0;
  28. }
  29. int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
  30. struct sg_table *sgt, unsigned len, int prot)
  31. {
  32. struct iommu_domain *domain = iommu->domain;
  33. struct scatterlist *sg;
  34. unsigned int da = iova;
  35. unsigned int i, j;
  36. int ret;
  37. if (!domain || !sgt)
  38. return -EINVAL;
  39. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  40. u32 pa = sg_dma_address(sg) - sg->offset;
  41. size_t bytes = sg_dma_len(sg) + sg->offset;
  42. VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
  43. ret = iommu_map(domain, da, pa, bytes, prot);
  44. if (ret)
  45. goto fail;
  46. da += bytes;
  47. }
  48. return 0;
  49. fail:
  50. da = iova;
  51. for_each_sg(sgt->sgl, sg, i, j) {
  52. size_t bytes = sg_dma_len(sg) + sg->offset;
  53. iommu_unmap(domain, da, bytes);
  54. da += bytes;
  55. }
  56. return ret;
  57. }
  58. int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
  59. struct sg_table *sgt, unsigned len)
  60. {
  61. struct iommu_domain *domain = iommu->domain;
  62. struct scatterlist *sg;
  63. unsigned int da = iova;
  64. int i;
  65. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  66. size_t bytes = sg_dma_len(sg) + sg->offset;
  67. size_t unmapped;
  68. unmapped = iommu_unmap(domain, da, bytes);
  69. if (unmapped < bytes)
  70. return unmapped;
  71. VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
  72. BUG_ON(!PAGE_ALIGNED(bytes));
  73. da += bytes;
  74. }
  75. return 0;
  76. }
  77. static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
  78. struct etnaviv_vram_mapping *mapping)
  79. {
  80. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  81. etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
  82. etnaviv_obj->sgt, etnaviv_obj->base.size);
  83. drm_mm_remove_node(&mapping->vram_node);
  84. }
  85. static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
  86. struct drm_mm_node *node, size_t size)
  87. {
  88. struct etnaviv_vram_mapping *free = NULL;
  89. enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
  90. int ret;
  91. lockdep_assert_held(&mmu->lock);
  92. while (1) {
  93. struct etnaviv_vram_mapping *m, *n;
  94. struct drm_mm_scan scan;
  95. struct list_head list;
  96. bool found;
  97. ret = drm_mm_insert_node_in_range(&mmu->mm, node,
  98. size, 0, 0,
  99. mmu->last_iova, U64_MAX,
  100. mode);
  101. if (ret != -ENOSPC)
  102. break;
  103. /*
  104. * If we did not search from the start of the MMU region,
  105. * try again in case there are free slots.
  106. */
  107. if (mmu->last_iova) {
  108. mmu->last_iova = 0;
  109. mmu->need_flush = true;
  110. continue;
  111. }
  112. /* Try to retire some entries */
  113. drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
  114. found = 0;
  115. INIT_LIST_HEAD(&list);
  116. list_for_each_entry(free, &mmu->mappings, mmu_node) {
  117. /* If this vram node has not been used, skip this. */
  118. if (!free->vram_node.mm)
  119. continue;
  120. /*
  121. * If the iova is pinned, then it's in-use,
  122. * so we must keep its mapping.
  123. */
  124. if (free->use)
  125. continue;
  126. list_add(&free->scan_node, &list);
  127. if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
  128. found = true;
  129. break;
  130. }
  131. }
  132. if (!found) {
  133. /* Nothing found, clean up and fail */
  134. list_for_each_entry_safe(m, n, &list, scan_node)
  135. BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
  136. break;
  137. }
  138. /*
  139. * drm_mm does not allow any other operations while
  140. * scanning, so we have to remove all blocks first.
  141. * If drm_mm_scan_remove_block() returns false, we
  142. * can leave the block pinned.
  143. */
  144. list_for_each_entry_safe(m, n, &list, scan_node)
  145. if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
  146. list_del_init(&m->scan_node);
  147. /*
  148. * Unmap the blocks which need to be reaped from the MMU.
  149. * Clear the mmu pointer to prevent the mapping_get finding
  150. * this mapping.
  151. */
  152. list_for_each_entry_safe(m, n, &list, scan_node) {
  153. etnaviv_iommu_remove_mapping(mmu, m);
  154. m->mmu = NULL;
  155. list_del_init(&m->mmu_node);
  156. list_del_init(&m->scan_node);
  157. }
  158. mode = DRM_MM_INSERT_EVICT;
  159. /*
  160. * We removed enough mappings so that the new allocation will
  161. * succeed, retry the allocation one more time.
  162. */
  163. }
  164. return ret;
  165. }
  166. int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
  167. struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
  168. struct etnaviv_vram_mapping *mapping)
  169. {
  170. struct sg_table *sgt = etnaviv_obj->sgt;
  171. struct drm_mm_node *node;
  172. int ret;
  173. lockdep_assert_held(&etnaviv_obj->lock);
  174. mutex_lock(&mmu->lock);
  175. /* v1 MMU can optimize single entry (contiguous) scatterlists */
  176. if (mmu->version == ETNAVIV_IOMMU_V1 &&
  177. sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
  178. u32 iova;
  179. iova = sg_dma_address(sgt->sgl) - memory_base;
  180. if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
  181. mapping->iova = iova;
  182. list_add_tail(&mapping->mmu_node, &mmu->mappings);
  183. mutex_unlock(&mmu->lock);
  184. return 0;
  185. }
  186. }
  187. node = &mapping->vram_node;
  188. ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
  189. if (ret < 0) {
  190. mutex_unlock(&mmu->lock);
  191. return ret;
  192. }
  193. mmu->last_iova = node->start + etnaviv_obj->base.size;
  194. mapping->iova = node->start;
  195. ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
  196. IOMMU_READ | IOMMU_WRITE);
  197. if (ret < 0) {
  198. drm_mm_remove_node(node);
  199. mutex_unlock(&mmu->lock);
  200. return ret;
  201. }
  202. list_add_tail(&mapping->mmu_node, &mmu->mappings);
  203. mmu->need_flush = true;
  204. mutex_unlock(&mmu->lock);
  205. return ret;
  206. }
  207. void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
  208. struct etnaviv_vram_mapping *mapping)
  209. {
  210. WARN_ON(mapping->use);
  211. mutex_lock(&mmu->lock);
  212. /* If the vram node is on the mm, unmap and remove the node */
  213. if (mapping->vram_node.mm == &mmu->mm)
  214. etnaviv_iommu_remove_mapping(mmu, mapping);
  215. list_del(&mapping->mmu_node);
  216. mmu->need_flush = true;
  217. mutex_unlock(&mmu->lock);
  218. }
  219. void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
  220. {
  221. drm_mm_takedown(&mmu->mm);
  222. iommu_domain_free(mmu->domain);
  223. kfree(mmu);
  224. }
  225. struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
  226. {
  227. enum etnaviv_iommu_version version;
  228. struct etnaviv_iommu *mmu;
  229. mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
  230. if (!mmu)
  231. return ERR_PTR(-ENOMEM);
  232. if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
  233. mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
  234. version = ETNAVIV_IOMMU_V1;
  235. } else {
  236. mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
  237. version = ETNAVIV_IOMMU_V2;
  238. }
  239. if (!mmu->domain) {
  240. dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
  241. kfree(mmu);
  242. return ERR_PTR(-ENOMEM);
  243. }
  244. mmu->gpu = gpu;
  245. mmu->version = version;
  246. mutex_init(&mmu->lock);
  247. INIT_LIST_HEAD(&mmu->mappings);
  248. drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
  249. mmu->domain->geometry.aperture_end -
  250. mmu->domain->geometry.aperture_start + 1);
  251. iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
  252. return mmu;
  253. }
  254. void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
  255. {
  256. if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
  257. etnaviv_iommuv1_restore(gpu);
  258. else
  259. etnaviv_iommuv2_restore(gpu);
  260. }
  261. int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
  262. struct drm_mm_node *vram_node, size_t size,
  263. u32 *iova)
  264. {
  265. struct etnaviv_iommu *mmu = gpu->mmu;
  266. if (mmu->version == ETNAVIV_IOMMU_V1) {
  267. *iova = paddr - gpu->memory_base;
  268. return 0;
  269. } else {
  270. int ret;
  271. mutex_lock(&mmu->lock);
  272. ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
  273. if (ret < 0) {
  274. mutex_unlock(&mmu->lock);
  275. return ret;
  276. }
  277. ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
  278. IOMMU_READ);
  279. if (ret < 0) {
  280. drm_mm_remove_node(vram_node);
  281. mutex_unlock(&mmu->lock);
  282. return ret;
  283. }
  284. mmu->last_iova = vram_node->start + size;
  285. gpu->mmu->need_flush = true;
  286. mutex_unlock(&mmu->lock);
  287. *iova = (u32)vram_node->start;
  288. return 0;
  289. }
  290. }
  291. void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
  292. struct drm_mm_node *vram_node, size_t size,
  293. u32 iova)
  294. {
  295. struct etnaviv_iommu *mmu = gpu->mmu;
  296. if (mmu->version == ETNAVIV_IOMMU_V2) {
  297. mutex_lock(&mmu->lock);
  298. iommu_unmap(mmu->domain,iova, size);
  299. drm_mm_remove_node(vram_node);
  300. mutex_unlock(&mmu->lock);
  301. }
  302. }
  303. size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
  304. {
  305. struct etnaviv_iommu_ops *ops;
  306. ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
  307. return ops->dump_size(iommu->domain);
  308. }
  309. void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
  310. {
  311. struct etnaviv_iommu_ops *ops;
  312. ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
  313. ops->dump(iommu->domain, buf);
  314. }