|
@@ -14,7 +14,6 @@
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
*/
|
|
|
|
|
|
-#include <linux/iommu.h>
|
|
|
#include <linux/platform_device.h>
|
|
|
#include <linux/sizes.h>
|
|
|
#include <linux/slab.h>
|
|
@@ -40,10 +39,7 @@
|
|
|
#define MMUv2_MAX_STLB_ENTRIES 1024
|
|
|
|
|
|
struct etnaviv_iommuv2_domain {
|
|
|
- struct iommu_domain domain;
|
|
|
- struct device *dev;
|
|
|
- void *bad_page_cpu;
|
|
|
- dma_addr_t bad_page_dma;
|
|
|
+ struct etnaviv_iommu_domain base;
|
|
|
/* M(aster) TLB aka first level pagetable */
|
|
|
u32 *mtlb_cpu;
|
|
|
dma_addr_t mtlb_dma;
|
|
@@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain {
|
|
|
dma_addr_t stlb_dma[1024];
|
|
|
};
|
|
|
|
|
|
-static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
|
|
|
+static struct etnaviv_iommuv2_domain *
|
|
|
+to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
|
|
|
{
|
|
|
- return container_of(domain, struct etnaviv_iommuv2_domain, domain);
|
|
|
+ return container_of(domain, struct etnaviv_iommuv2_domain, base);
|
|
|
}
|
|
|
|
|
|
-static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
|
|
|
- phys_addr_t paddr, size_t size, int prot)
|
|
|
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
|
|
|
+ unsigned long iova, phys_addr_t paddr,
|
|
|
+ size_t size, int prot)
|
|
|
{
|
|
|
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
|
|
to_etnaviv_domain(domain);
|
|
@@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
|
|
|
if (size != SZ_4K)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (prot & IOMMU_WRITE)
|
|
|
+ if (prot & ETNAVIV_PROT_WRITE)
|
|
|
entry |= MMUv2_PTE_WRITEABLE;
|
|
|
|
|
|
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
|
|
@@ -79,8 +77,8 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
|
|
|
- unsigned long iova, size_t size)
|
|
|
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
|
|
|
+ unsigned long iova, size_t size)
|
|
|
{
|
|
|
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
|
|
to_etnaviv_domain(domain);
|
|
@@ -103,19 +101,20 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
|
|
|
int ret, i, j;
|
|
|
|
|
|
/* allocate scratch page */
|
|
|
- etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
|
|
- SZ_4K,
|
|
|
- &etnaviv_domain->bad_page_dma,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!etnaviv_domain->bad_page_cpu) {
|
|
|
+ etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
|
|
|
+ etnaviv_domain->base.dev,
|
|
|
+ SZ_4K,
|
|
|
+ &etnaviv_domain->base.bad_page_dma,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!etnaviv_domain->base.bad_page_cpu) {
|
|
|
ret = -ENOMEM;
|
|
|
goto fail_mem;
|
|
|
}
|
|
|
- p = etnaviv_domain->bad_page_cpu;
|
|
|
+ p = etnaviv_domain->base.bad_page_cpu;
|
|
|
for (i = 0; i < SZ_4K / 4; i++)
|
|
|
*p++ = 0xdead55aa;
|
|
|
|
|
|
- etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
|
|
+ etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
|
|
|
SZ_4K,
|
|
|
&etnaviv_domain->mtlb_dma,
|
|
|
GFP_KERNEL);
|
|
@@ -127,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
|
|
|
/* pre-populate STLB pages (may want to switch to on-demand later) */
|
|
|
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
|
|
|
etnaviv_domain->stlb_cpu[i] =
|
|
|
- dma_alloc_coherent(etnaviv_domain->dev,
|
|
|
+ dma_alloc_coherent(etnaviv_domain->base.dev,
|
|
|
SZ_4K,
|
|
|
&etnaviv_domain->stlb_dma[i],
|
|
|
GFP_KERNEL);
|
|
@@ -146,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
|
|
|
return 0;
|
|
|
|
|
|
fail_mem:
|
|
|
- if (etnaviv_domain->bad_page_cpu)
|
|
|
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
|
- etnaviv_domain->bad_page_cpu,
|
|
|
- etnaviv_domain->bad_page_dma);
|
|
|
+ if (etnaviv_domain->base.bad_page_cpu)
|
|
|
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
|
|
+ etnaviv_domain->base.bad_page_cpu,
|
|
|
+ etnaviv_domain->base.bad_page_dma);
|
|
|
|
|
|
if (etnaviv_domain->mtlb_cpu)
|
|
|
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
|
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
|
|
etnaviv_domain->mtlb_cpu,
|
|
|
etnaviv_domain->mtlb_dma);
|
|
|
|
|
|
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
|
|
|
if (etnaviv_domain->stlb_cpu[i])
|
|
|
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
|
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
|
|
etnaviv_domain->stlb_cpu[i],
|
|
|
etnaviv_domain->stlb_dma[i]);
|
|
|
}
|
|
@@ -166,23 +165,23 @@ fail_mem:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
|
|
|
+static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
|
|
|
{
|
|
|
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
|
|
to_etnaviv_domain(domain);
|
|
|
int i;
|
|
|
|
|
|
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
|
- etnaviv_domain->bad_page_cpu,
|
|
|
- etnaviv_domain->bad_page_dma);
|
|
|
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
|
|
+ etnaviv_domain->base.bad_page_cpu,
|
|
|
+ etnaviv_domain->base.bad_page_dma);
|
|
|
|
|
|
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
|
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
|
|
etnaviv_domain->mtlb_cpu,
|
|
|
etnaviv_domain->mtlb_dma);
|
|
|
|
|
|
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
|
|
|
if (etnaviv_domain->stlb_cpu[i])
|
|
|
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
|
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
|
|
etnaviv_domain->stlb_cpu[i],
|
|
|
etnaviv_domain->stlb_dma[i]);
|
|
|
}
|
|
@@ -190,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
|
|
|
vfree(etnaviv_domain);
|
|
|
}
|
|
|
|
|
|
-static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
|
|
|
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
|
|
|
{
|
|
|
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
|
|
to_etnaviv_domain(domain);
|
|
@@ -204,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
|
|
|
return dump_size;
|
|
|
}
|
|
|
|
|
|
-static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
|
|
|
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
|
|
|
{
|
|
|
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
|
|
to_etnaviv_domain(domain);
|
|
@@ -217,17 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
|
|
|
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
|
|
|
}
|
|
|
|
|
|
-static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
|
|
- .ops = {
|
|
|
- .domain_free = etnaviv_iommuv2_domain_free,
|
|
|
- .map = etnaviv_iommuv2_map,
|
|
|
- .unmap = etnaviv_iommuv2_unmap,
|
|
|
- .pgsize_bitmap = SZ_4K,
|
|
|
- },
|
|
|
- .dump_size = etnaviv_iommuv2_dump_size,
|
|
|
- .dump = etnaviv_iommuv2_dump,
|
|
|
-};
|
|
|
-
|
|
|
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
|
|
|
{
|
|
|
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
|
@@ -240,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
|
|
|
|
|
|
prefetch = etnaviv_buffer_config_mmuv2(gpu,
|
|
|
(u32)etnaviv_domain->mtlb_dma,
|
|
|
- (u32)etnaviv_domain->bad_page_dma);
|
|
|
+ (u32)etnaviv_domain->base.bad_page_dma);
|
|
|
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
|
|
|
prefetch);
|
|
|
etnaviv_gpu_wait_idle(gpu, 100);
|
|
|
|
|
|
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
|
|
|
}
|
|
|
-struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
|
|
|
+
|
|
|
+const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
|
|
|
+ .free = etnaviv_iommuv2_domain_free,
|
|
|
+ .map = etnaviv_iommuv2_map,
|
|
|
+ .unmap = etnaviv_iommuv2_unmap,
|
|
|
+ .dump_size = etnaviv_iommuv2_dump_size,
|
|
|
+ .dump = etnaviv_iommuv2_dump,
|
|
|
+};
|
|
|
+
|
|
|
+struct etnaviv_iommu_domain *
|
|
|
+etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
|
|
|
{
|
|
|
struct etnaviv_iommuv2_domain *etnaviv_domain;
|
|
|
+ struct etnaviv_iommu_domain *domain;
|
|
|
int ret;
|
|
|
|
|
|
etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
|
|
|
if (!etnaviv_domain)
|
|
|
return NULL;
|
|
|
|
|
|
- etnaviv_domain->dev = gpu->dev;
|
|
|
+ domain = &etnaviv_domain->base;
|
|
|
|
|
|
- etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
|
|
|
- etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
|
|
|
- etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
|
|
|
- etnaviv_domain->domain.geometry.aperture_start = 0;
|
|
|
- etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
|
|
|
+ domain->dev = gpu->dev;
|
|
|
+ domain->base = 0;
|
|
|
+ domain->size = (u64)SZ_1G * 4;
|
|
|
+ domain->ops = &etnaviv_iommuv2_ops;
|
|
|
|
|
|
ret = etnaviv_iommuv2_init(etnaviv_domain);
|
|
|
if (ret)
|
|
|
goto out_free;
|
|
|
|
|
|
- return &etnaviv_domain->domain;
|
|
|
+ return &etnaviv_domain->base;
|
|
|
|
|
|
out_free:
|
|
|
vfree(etnaviv_domain);
|