|
@@ -52,7 +52,7 @@
|
|
|
|
|
|
struct gk20a_instobj {
|
|
struct gk20a_instobj {
|
|
struct nvkm_memory memory;
|
|
struct nvkm_memory memory;
|
|
- struct nvkm_mem mem;
|
|
|
|
|
|
+ struct nvkm_mm_node *mn;
|
|
struct gk20a_instmem *imem;
|
|
struct gk20a_instmem *imem;
|
|
|
|
|
|
/* CPU mapping */
|
|
/* CPU mapping */
|
|
@@ -129,13 +129,13 @@ gk20a_instobj_page(struct nvkm_memory *memory)
|
|
static u64
|
|
static u64
|
|
gk20a_instobj_addr(struct nvkm_memory *memory)
|
|
gk20a_instobj_addr(struct nvkm_memory *memory)
|
|
{
|
|
{
|
|
- return gk20a_instobj(memory)->mem.offset;
|
|
|
|
|
|
+ return (u64)gk20a_instobj(memory)->mn->offset << 12;
|
|
}
|
|
}
|
|
|
|
|
|
static u64
|
|
static u64
|
|
gk20a_instobj_size(struct nvkm_memory *memory)
|
|
gk20a_instobj_size(struct nvkm_memory *memory)
|
|
{
|
|
{
|
|
- return (u64)gk20a_instobj(memory)->mem.size << 12;
|
|
|
|
|
|
+ return (u64)gk20a_instobj(memory)->mn->length << 12;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -284,8 +284,22 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
|
|
struct nvkm_vma *vma, void *argv, u32 argc)
|
|
struct nvkm_vma *vma, void *argv, u32 argc)
|
|
{
|
|
{
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
- nvkm_vm_map_at(vma, 0, &node->mem);
|
|
|
|
- return 0;
|
|
|
|
|
|
+ struct nvkm_vmm_map map = {
|
|
|
|
+ .memory = &node->memory,
|
|
|
|
+ .offset = offset,
|
|
|
|
+ .mem = node->mn,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ if (vma->vm) {
|
|
|
|
+ struct nvkm_mem mem = {
|
|
|
|
+ .mem = node->mn,
|
|
|
|
+ .memory = &node->memory,
|
|
|
|
+ };
|
|
|
|
+ nvkm_vm_map_at(vma, 0, &mem);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
|
|
}
|
|
}
|
|
|
|
|
|
static void *
|
|
static void *
|
|
@@ -298,8 +312,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
|
|
if (unlikely(!node->base.vaddr))
|
|
if (unlikely(!node->base.vaddr))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
|
|
|
|
- node->handle, imem->attrs);
|
|
|
|
|
|
+ dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
|
|
|
|
+ node->base.vaddr, node->handle, imem->attrs);
|
|
|
|
|
|
out:
|
|
out:
|
|
return node;
|
|
return node;
|
|
@@ -311,7 +325,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
|
|
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
|
|
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
|
|
struct gk20a_instmem *imem = node->base.imem;
|
|
struct gk20a_instmem *imem = node->base.imem;
|
|
struct device *dev = imem->base.subdev.device->dev;
|
|
struct device *dev = imem->base.subdev.device->dev;
|
|
- struct nvkm_mm_node *r = node->base.mem.mem;
|
|
|
|
|
|
+ struct nvkm_mm_node *r = node->base.mn;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
if (unlikely(!r))
|
|
if (unlikely(!r))
|
|
@@ -329,7 +343,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
|
|
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
|
|
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
|
|
|
|
|
|
/* Unmap pages from GPU address space and free them */
|
|
/* Unmap pages from GPU address space and free them */
|
|
- for (i = 0; i < node->base.mem.size; i++) {
|
|
|
|
|
|
+ for (i = 0; i < node->base.mn->length; i++) {
|
|
iommu_unmap(imem->domain,
|
|
iommu_unmap(imem->domain,
|
|
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
|
|
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
|
|
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
|
|
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
|
|
@@ -410,8 +424,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|
node->r.offset = node->handle >> 12;
|
|
node->r.offset = node->handle >> 12;
|
|
node->r.length = (npages << PAGE_SHIFT) >> 12;
|
|
node->r.length = (npages << PAGE_SHIFT) >> 12;
|
|
|
|
|
|
- node->base.mem.offset = node->handle;
|
|
|
|
- node->base.mem.mem = &node->r;
|
|
|
|
|
|
+ node->base.mn = &node->r;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -488,8 +501,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|
/* IOMMU bit tells that an address is to be resolved through the IOMMU */
|
|
/* IOMMU bit tells that an address is to be resolved through the IOMMU */
|
|
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
|
|
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
|
|
|
|
|
|
- node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
|
|
|
|
- node->base.mem.mem = r;
|
|
|
|
|
|
+ node->base.mn = r;
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
release_area:
|
|
release_area:
|
|
@@ -537,13 +549,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|
|
|
|
|
node->imem = imem;
|
|
node->imem = imem;
|
|
|
|
|
|
- /* present memory for being mapped using small pages */
|
|
|
|
- node->mem.size = size >> 12;
|
|
|
|
- node->mem.memtype = 0;
|
|
|
|
- node->mem.memory = &node->memory;
|
|
|
|
-
|
|
|
|
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
|
|
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
|
|
- size, align, node->mem.offset);
|
|
|
|
|
|
+ size, align, (u64)node->mn->offset << 12);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|