|
@@ -23,35 +23,42 @@
|
|
/*
|
|
/*
|
|
* GK20A does not have dedicated video memory, and to accurately represent this
|
|
* GK20A does not have dedicated video memory, and to accurately represent this
|
|
* fact Nouveau will not create a RAM device for it. Therefore its instmem
|
|
* fact Nouveau will not create a RAM device for it. Therefore its instmem
|
|
- * implementation must be done directly on top of system memory, while providing
|
|
|
|
- * coherent read and write operations.
|
|
|
|
|
|
+ * implementation must be done directly on top of system memory, while
|
|
|
|
+ * preserving coherency for read and write operations.
|
|
*
|
|
*
|
|
* Instmem can be allocated through two means:
|
|
* Instmem can be allocated through two means:
|
|
- * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory
|
|
|
|
|
|
+ * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
|
|
* pages contiguous to the GPU. This is the preferred way.
|
|
* pages contiguous to the GPU. This is the preferred way.
|
|
- * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically
|
|
|
|
|
|
+ * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
|
|
* contiguous memory.
|
|
* contiguous memory.
|
|
*
|
|
*
|
|
- * In both cases CPU read and writes are performed using PRAMIN (i.e. using the
|
|
|
|
- * GPU path) to ensure these operations are coherent for the GPU. This allows us
|
|
|
|
- * to use more "relaxed" allocation parameters when using the DMA API, since we
|
|
|
|
- * never need a kernel mapping.
|
|
|
|
|
|
+ * In both cases CPU read and writes are performed by creating a write-combined
|
|
|
|
+ * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
|
|
|
|
+ * be conservative we do this every time we acquire or release an instobj, but
|
|
|
|
+ * ideally L2 management should be handled at a higher level.
|
|
|
|
+ *
|
|
|
|
+ * To improve performance, CPU mappings are not removed upon instobj release.
|
|
|
|
+ * Instead they are placed into a LRU list to be recycled when the mapped space
|
|
|
|
+ * goes beyond a certain threshold. At the moment this limit is 1MB.
|
|
*/
|
|
*/
|
|
-#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
|
|
|
|
#include "priv.h"
|
|
#include "priv.h"
|
|
|
|
|
|
#include <core/memory.h>
|
|
#include <core/memory.h>
|
|
#include <core/mm.h>
|
|
#include <core/mm.h>
|
|
#include <core/tegra.h>
|
|
#include <core/tegra.h>
|
|
#include <subdev/fb.h>
|
|
#include <subdev/fb.h>
|
|
-
|
|
|
|
-#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
|
|
|
|
|
|
+#include <subdev/ltc.h>
|
|
|
|
|
|
struct gk20a_instobj {
|
|
struct gk20a_instobj {
|
|
struct nvkm_memory memory;
|
|
struct nvkm_memory memory;
|
|
- struct gk20a_instmem *imem;
|
|
|
|
struct nvkm_mem mem;
|
|
struct nvkm_mem mem;
|
|
|
|
+ struct gk20a_instmem *imem;
|
|
|
|
+
|
|
|
|
+ /* CPU mapping */
|
|
|
|
+ u32 *vaddr;
|
|
|
|
+ struct list_head vaddr_node;
|
|
};
|
|
};
|
|
|
|
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Used for objects allocated using the DMA API
|
|
* Used for objects allocated using the DMA API
|
|
@@ -59,10 +66,12 @@ struct gk20a_instobj {
|
|
struct gk20a_instobj_dma {
|
|
struct gk20a_instobj_dma {
|
|
struct gk20a_instobj base;
|
|
struct gk20a_instobj base;
|
|
|
|
|
|
- void *cpuaddr;
|
|
|
|
|
|
+ u32 *cpuaddr;
|
|
dma_addr_t handle;
|
|
dma_addr_t handle;
|
|
struct nvkm_mm_node r;
|
|
struct nvkm_mm_node r;
|
|
};
|
|
};
|
|
|
|
+#define gk20a_instobj_dma(p) \
|
|
|
|
+ container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Used for objects flattened using the IOMMU API
|
|
* Used for objects flattened using the IOMMU API
|
|
@@ -70,15 +79,24 @@ struct gk20a_instobj_dma {
|
|
struct gk20a_instobj_iommu {
|
|
struct gk20a_instobj_iommu {
|
|
struct gk20a_instobj base;
|
|
struct gk20a_instobj base;
|
|
|
|
|
|
- /* array of base.mem->size pages */
|
|
|
|
|
|
+ /* will point to the higher half of pages */
|
|
|
|
+ dma_addr_t *dma_addrs;
|
|
|
|
+ /* array of base.mem->size pages (+ dma_addr_ts) */
|
|
struct page *pages[];
|
|
struct page *pages[];
|
|
};
|
|
};
|
|
|
|
+#define gk20a_instobj_iommu(p) \
|
|
|
|
+ container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
|
|
|
|
|
|
struct gk20a_instmem {
|
|
struct gk20a_instmem {
|
|
struct nvkm_instmem base;
|
|
struct nvkm_instmem base;
|
|
- unsigned long lock_flags;
|
|
|
|
|
|
+
|
|
|
|
+ /* protects vaddr_* and gk20a_instobj::vaddr* */
|
|
spinlock_t lock;
|
|
spinlock_t lock;
|
|
- u64 addr;
|
|
|
|
|
|
+
|
|
|
|
+ /* CPU mappings LRU */
|
|
|
|
+ unsigned int vaddr_use;
|
|
|
|
+ unsigned int vaddr_max;
|
|
|
|
+ struct list_head vaddr_lru;
|
|
|
|
|
|
/* Only used if IOMMU if present */
|
|
/* Only used if IOMMU if present */
|
|
struct mutex *mm_mutex;
|
|
struct mutex *mm_mutex;
|
|
@@ -88,7 +106,10 @@ struct gk20a_instmem {
|
|
|
|
|
|
/* Only used by DMA API */
|
|
/* Only used by DMA API */
|
|
struct dma_attrs attrs;
|
|
struct dma_attrs attrs;
|
|
|
|
+
|
|
|
|
+ void __iomem * (*cpu_map)(struct nvkm_memory *);
|
|
};
|
|
};
|
|
|
|
+#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
|
|
|
|
|
|
static enum nvkm_memory_target
|
|
static enum nvkm_memory_target
|
|
gk20a_instobj_target(struct nvkm_memory *memory)
|
|
gk20a_instobj_target(struct nvkm_memory *memory)
|
|
@@ -100,7 +121,6 @@ static u64
|
|
gk20a_instobj_addr(struct nvkm_memory *memory)
|
|
gk20a_instobj_addr(struct nvkm_memory *memory)
|
|
{
|
|
{
|
|
return gk20a_instobj(memory)->mem.offset;
|
|
return gk20a_instobj(memory)->mem.offset;
|
|
-
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static u64
|
|
static u64
|
|
@@ -109,108 +129,218 @@ gk20a_instobj_size(struct nvkm_memory *memory)
|
|
return (u64)gk20a_instobj(memory)->mem.size << 12;
|
|
return (u64)gk20a_instobj(memory)->mem.size << 12;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __iomem *
|
|
|
|
+gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory)
|
|
|
|
+{
|
|
|
|
+ struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
|
|
|
|
+ struct device *dev = node->base.imem->base.subdev.device->dev;
|
|
|
|
+ int npages = nvkm_memory_size(memory) >> 12;
|
|
|
|
+ struct page *pages[npages];
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ /* phys_to_page does not exist on all platforms... */
|
|
|
|
+ pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT);
|
|
|
|
+ for (i = 1; i < npages; i++)
|
|
|
|
+ pages[i] = pages[0] + i;
|
|
|
|
+
|
|
|
|
+ return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __iomem *
|
|
|
|
+gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory)
|
|
|
|
+{
|
|
|
|
+ struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
|
|
|
|
+ int npages = nvkm_memory_size(memory) >> 12;
|
|
|
|
+
|
|
|
|
+ return vmap(node->pages, npages, VM_MAP,
|
|
|
|
+ pgprot_writecombine(PAGE_KERNEL));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Must be called while holding gk20a_instmem_lock
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
|
|
|
|
+{
|
|
|
|
+ while (imem->vaddr_use + size > imem->vaddr_max) {
|
|
|
|
+ struct gk20a_instobj *obj;
|
|
|
|
+
|
|
|
|
+ /* no candidate that can be unmapped, abort... */
|
|
|
|
+ if (list_empty(&imem->vaddr_lru))
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj,
|
|
|
|
+ vaddr_node);
|
|
|
|
+ list_del(&obj->vaddr_node);
|
|
|
|
+ vunmap(obj->vaddr);
|
|
|
|
+ obj->vaddr = NULL;
|
|
|
|
+ imem->vaddr_use -= nvkm_memory_size(&obj->memory);
|
|
|
|
+ nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n",
|
|
|
|
+ imem->vaddr_use, imem->vaddr_max);
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
static void __iomem *
|
|
static void __iomem *
|
|
gk20a_instobj_acquire(struct nvkm_memory *memory)
|
|
gk20a_instobj_acquire(struct nvkm_memory *memory)
|
|
{
|
|
{
|
|
- struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
|
|
|
|
|
|
+ struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
|
|
+ struct gk20a_instmem *imem = node->imem;
|
|
|
|
+ struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
|
|
|
|
+ const u64 size = nvkm_memory_size(memory);
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
+
|
|
|
|
+ nvkm_ltc_flush(ltc);
|
|
|
|
+
|
|
spin_lock_irqsave(&imem->lock, flags);
|
|
spin_lock_irqsave(&imem->lock, flags);
|
|
- imem->lock_flags = flags;
|
|
|
|
- return NULL;
|
|
|
|
|
|
+
|
|
|
|
+ if (node->vaddr) {
|
|
|
|
+ /* remove us from the LRU list since we cannot be unmapped */
|
|
|
|
+ list_del(&node->vaddr_node);
|
|
|
|
+
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* try to free some address space if we reached the limit */
|
|
|
|
+ gk20a_instmem_vaddr_gc(imem, size);
|
|
|
|
+
|
|
|
|
+ node->vaddr = imem->cpu_map(memory);
|
|
|
|
+
|
|
|
|
+ if (!node->vaddr) {
|
|
|
|
+ nvkm_error(&imem->base.subdev, "cannot map instobj - "
|
|
|
|
+ "this is not going to end well...\n");
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ imem->vaddr_use += size;
|
|
|
|
+ nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
|
|
|
|
+ imem->vaddr_use, imem->vaddr_max);
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ spin_unlock_irqrestore(&imem->lock, flags);
|
|
|
|
+
|
|
|
|
+ return node->vaddr;
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
gk20a_instobj_release(struct nvkm_memory *memory)
|
|
gk20a_instobj_release(struct nvkm_memory *memory)
|
|
{
|
|
{
|
|
- struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
|
|
|
|
- spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
|
|
|
|
-}
|
|
|
|
|
|
+ struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
|
|
+ struct gk20a_instmem *imem = node->imem;
|
|
|
|
+ struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
-/*
|
|
|
|
- * Use PRAMIN to read/write data and avoid coherency issues.
|
|
|
|
- * PRAMIN uses the GPU path and ensures data will always be coherent.
|
|
|
|
- *
|
|
|
|
- * A dynamic mapping based solution would be desirable in the future, but
|
|
|
|
- * the issue remains of how to maintain coherency efficiently. On ARM it is
|
|
|
|
- * not easy (if possible at all?) to create uncached temporary mappings.
|
|
|
|
- */
|
|
|
|
|
|
+ spin_lock_irqsave(&imem->lock, flags);
|
|
|
|
+
|
|
|
|
+ /* add ourselves to the LRU list so our CPU mapping can be freed */
|
|
|
|
+ list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&imem->lock, flags);
|
|
|
|
+
|
|
|
|
+ wmb();
|
|
|
|
+ nvkm_ltc_invalidate(ltc);
|
|
|
|
+}
|
|
|
|
|
|
static u32
|
|
static u32
|
|
gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
|
|
gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
|
|
{
|
|
{
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
- struct gk20a_instmem *imem = node->imem;
|
|
|
|
- struct nvkm_device *device = imem->base.subdev.device;
|
|
|
|
- u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
|
|
|
|
- u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
|
|
|
|
- u32 data;
|
|
|
|
-
|
|
|
|
- if (unlikely(imem->addr != base)) {
|
|
|
|
- nvkm_wr32(device, 0x001700, base >> 16);
|
|
|
|
- imem->addr = base;
|
|
|
|
- }
|
|
|
|
- data = nvkm_rd32(device, 0x700000 + addr);
|
|
|
|
- return data;
|
|
|
|
|
|
+
|
|
|
|
+ return node->vaddr[offset / 4];
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
|
|
gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
|
|
{
|
|
{
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
- struct gk20a_instmem *imem = node->imem;
|
|
|
|
- struct nvkm_device *device = imem->base.subdev.device;
|
|
|
|
- u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
|
|
|
|
- u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
|
|
|
|
|
|
|
|
- if (unlikely(imem->addr != base)) {
|
|
|
|
- nvkm_wr32(device, 0x001700, base >> 16);
|
|
|
|
- imem->addr = base;
|
|
|
|
- }
|
|
|
|
- nvkm_wr32(device, 0x700000 + addr, data);
|
|
|
|
|
|
+ node->vaddr[offset / 4] = data;
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
|
|
gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
|
|
{
|
|
{
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
|
|
+
|
|
nvkm_vm_map_at(vma, offset, &node->mem);
|
|
nvkm_vm_map_at(vma, offset, &node->mem);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Clear the CPU mapping of an instobj if it exists
|
|
|
|
+ */
|
|
static void
|
|
static void
|
|
-gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
|
|
|
|
|
|
+gk20a_instobj_dtor(struct gk20a_instobj *node)
|
|
|
|
+{
|
|
|
|
+ struct gk20a_instmem *imem = node->imem;
|
|
|
|
+ struct gk20a_instobj *obj;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&imem->lock, flags);
|
|
|
|
+
|
|
|
|
+ if (!node->vaddr)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) {
|
|
|
|
+ if (obj == node) {
|
|
|
|
+ list_del(&obj->vaddr_node);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ vunmap(node->vaddr);
|
|
|
|
+ node->vaddr = NULL;
|
|
|
|
+ imem->vaddr_use -= nvkm_memory_size(&node->memory);
|
|
|
|
+ nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
|
|
|
|
+ imem->vaddr_use, imem->vaddr_max);
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ spin_unlock_irqrestore(&imem->lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void *
|
|
|
|
+gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
|
|
{
|
|
{
|
|
- struct gk20a_instobj_dma *node = (void *)_node;
|
|
|
|
- struct gk20a_instmem *imem = _node->imem;
|
|
|
|
|
|
+ struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
|
|
|
|
+ struct gk20a_instmem *imem = node->base.imem;
|
|
struct device *dev = imem->base.subdev.device->dev;
|
|
struct device *dev = imem->base.subdev.device->dev;
|
|
|
|
|
|
|
|
+ gk20a_instobj_dtor(&node->base);
|
|
|
|
+
|
|
if (unlikely(!node->cpuaddr))
|
|
if (unlikely(!node->cpuaddr))
|
|
- return;
|
|
|
|
|
|
+ goto out;
|
|
|
|
|
|
- dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
|
|
|
|
|
|
+ dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr,
|
|
node->handle, &imem->attrs);
|
|
node->handle, &imem->attrs);
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ return node;
|
|
}
|
|
}
|
|
|
|
|
|
-static void
|
|
|
|
-gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
|
|
|
|
|
|
+static void *
|
|
|
|
+gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
|
|
{
|
|
{
|
|
- struct gk20a_instobj_iommu *node = (void *)_node;
|
|
|
|
- struct gk20a_instmem *imem = _node->imem;
|
|
|
|
|
|
+ struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
|
|
|
|
+ struct gk20a_instmem *imem = node->base.imem;
|
|
|
|
+ struct device *dev = imem->base.subdev.device->dev;
|
|
struct nvkm_mm_node *r;
|
|
struct nvkm_mm_node *r;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- if (unlikely(list_empty(&_node->mem.regions)))
|
|
|
|
- return;
|
|
|
|
|
|
+ gk20a_instobj_dtor(&node->base);
|
|
|
|
|
|
- r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
|
|
|
|
|
|
+ if (unlikely(list_empty(&node->base.mem.regions)))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
|
|
rl_entry);
|
|
rl_entry);
|
|
|
|
|
|
/* clear bit 34 to unmap pages */
|
|
/* clear bit 34 to unmap pages */
|
|
r->offset &= ~BIT(34 - imem->iommu_pgshift);
|
|
r->offset &= ~BIT(34 - imem->iommu_pgshift);
|
|
|
|
|
|
/* Unmap pages from GPU address space and free them */
|
|
/* Unmap pages from GPU address space and free them */
|
|
- for (i = 0; i < _node->mem.size; i++) {
|
|
|
|
|
|
+ for (i = 0; i < node->base.mem.size; i++) {
|
|
iommu_unmap(imem->domain,
|
|
iommu_unmap(imem->domain,
|
|
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
|
|
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
|
|
|
|
+ dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
|
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
__free_page(node->pages[i]);
|
|
__free_page(node->pages[i]);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -218,25 +348,27 @@ gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
|
|
mutex_lock(imem->mm_mutex);
|
|
mutex_lock(imem->mm_mutex);
|
|
nvkm_mm_free(imem->mm, &r);
|
|
nvkm_mm_free(imem->mm, &r);
|
|
mutex_unlock(imem->mm_mutex);
|
|
mutex_unlock(imem->mm_mutex);
|
|
-}
|
|
|
|
-
|
|
|
|
-static void *
|
|
|
|
-gk20a_instobj_dtor(struct nvkm_memory *memory)
|
|
|
|
-{
|
|
|
|
- struct gk20a_instobj *node = gk20a_instobj(memory);
|
|
|
|
- struct gk20a_instmem *imem = node->imem;
|
|
|
|
-
|
|
|
|
- if (imem->domain)
|
|
|
|
- gk20a_instobj_dtor_iommu(node);
|
|
|
|
- else
|
|
|
|
- gk20a_instobj_dtor_dma(node);
|
|
|
|
|
|
|
|
|
|
+out:
|
|
return node;
|
|
return node;
|
|
}
|
|
}
|
|
|
|
|
|
static const struct nvkm_memory_func
|
|
static const struct nvkm_memory_func
|
|
-gk20a_instobj_func = {
|
|
|
|
- .dtor = gk20a_instobj_dtor,
|
|
|
|
|
|
+gk20a_instobj_func_dma = {
|
|
|
|
+ .dtor = gk20a_instobj_dtor_dma,
|
|
|
|
+ .target = gk20a_instobj_target,
|
|
|
|
+ .addr = gk20a_instobj_addr,
|
|
|
|
+ .size = gk20a_instobj_size,
|
|
|
|
+ .acquire = gk20a_instobj_acquire,
|
|
|
|
+ .release = gk20a_instobj_release,
|
|
|
|
+ .rd32 = gk20a_instobj_rd32,
|
|
|
|
+ .wr32 = gk20a_instobj_wr32,
|
|
|
|
+ .map = gk20a_instobj_map,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static const struct nvkm_memory_func
|
|
|
|
+gk20a_instobj_func_iommu = {
|
|
|
|
+ .dtor = gk20a_instobj_dtor_iommu,
|
|
.target = gk20a_instobj_target,
|
|
.target = gk20a_instobj_target,
|
|
.addr = gk20a_instobj_addr,
|
|
.addr = gk20a_instobj_addr,
|
|
.size = gk20a_instobj_size,
|
|
.size = gk20a_instobj_size,
|
|
@@ -259,6 +391,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
*_node = &node->base;
|
|
*_node = &node->base;
|
|
|
|
|
|
|
|
+ nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
|
|
|
|
+
|
|
node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
|
|
node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
|
|
&node->handle, GFP_KERNEL,
|
|
&node->handle, GFP_KERNEL,
|
|
&imem->attrs);
|
|
&imem->attrs);
|
|
@@ -292,24 +426,40 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|
{
|
|
{
|
|
struct gk20a_instobj_iommu *node;
|
|
struct gk20a_instobj_iommu *node;
|
|
struct nvkm_subdev *subdev = &imem->base.subdev;
|
|
struct nvkm_subdev *subdev = &imem->base.subdev;
|
|
|
|
+ struct device *dev = subdev->device->dev;
|
|
struct nvkm_mm_node *r;
|
|
struct nvkm_mm_node *r;
|
|
int ret;
|
|
int ret;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- if (!(node = kzalloc(sizeof(*node) +
|
|
|
|
- sizeof( node->pages[0]) * npages, GFP_KERNEL)))
|
|
|
|
|
|
+ /*
|
|
|
|
+ * despite their variable size, instmem allocations are small enough
|
|
|
|
+ * (< 1 page) to be handled by kzalloc
|
|
|
|
+ */
|
|
|
|
+ if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
|
|
|
|
+ sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
*_node = &node->base;
|
|
*_node = &node->base;
|
|
|
|
+ node->dma_addrs = (void *)(node->pages + npages);
|
|
|
|
+
|
|
|
|
+ nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
|
|
|
|
|
|
/* Allocate backing memory */
|
|
/* Allocate backing memory */
|
|
for (i = 0; i < npages; i++) {
|
|
for (i = 0; i < npages; i++) {
|
|
struct page *p = alloc_page(GFP_KERNEL);
|
|
struct page *p = alloc_page(GFP_KERNEL);
|
|
|
|
+ dma_addr_t dma_adr;
|
|
|
|
|
|
if (p == NULL) {
|
|
if (p == NULL) {
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto free_pages;
|
|
goto free_pages;
|
|
}
|
|
}
|
|
node->pages[i] = p;
|
|
node->pages[i] = p;
|
|
|
|
+ dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
|
|
+ if (dma_mapping_error(dev, dma_adr)) {
|
|
|
|
+ nvkm_error(subdev, "DMA mapping error!\n");
|
|
|
|
+ ret = -ENOMEM;
|
|
|
|
+ goto free_pages;
|
|
|
|
+ }
|
|
|
|
+ node->dma_addrs[i] = dma_adr;
|
|
}
|
|
}
|
|
|
|
|
|
mutex_lock(imem->mm_mutex);
|
|
mutex_lock(imem->mm_mutex);
|
|
@@ -318,16 +468,15 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|
align >> imem->iommu_pgshift, &r);
|
|
align >> imem->iommu_pgshift, &r);
|
|
mutex_unlock(imem->mm_mutex);
|
|
mutex_unlock(imem->mm_mutex);
|
|
if (ret) {
|
|
if (ret) {
|
|
- nvkm_error(subdev, "virtual space is full!\n");
|
|
|
|
|
|
+ nvkm_error(subdev, "IOMMU space is full!\n");
|
|
goto free_pages;
|
|
goto free_pages;
|
|
}
|
|
}
|
|
|
|
|
|
/* Map into GPU address space */
|
|
/* Map into GPU address space */
|
|
for (i = 0; i < npages; i++) {
|
|
for (i = 0; i < npages; i++) {
|
|
- struct page *p = node->pages[i];
|
|
|
|
u32 offset = (r->offset + i) << imem->iommu_pgshift;
|
|
u32 offset = (r->offset + i) << imem->iommu_pgshift;
|
|
|
|
|
|
- ret = iommu_map(imem->domain, offset, page_to_phys(p),
|
|
|
|
|
|
+ ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
|
|
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
|
|
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
|
|
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
|
|
@@ -356,8 +505,13 @@ release_area:
|
|
mutex_unlock(imem->mm_mutex);
|
|
mutex_unlock(imem->mm_mutex);
|
|
|
|
|
|
free_pages:
|
|
free_pages:
|
|
- for (i = 0; i < npages && node->pages[i] != NULL; i++)
|
|
|
|
|
|
+ for (i = 0; i < npages && node->pages[i] != NULL; i++) {
|
|
|
|
+ dma_addr_t dma_addr = node->dma_addrs[i];
|
|
|
|
+ if (dma_addr)
|
|
|
|
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE,
|
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
__free_page(node->pages[i]);
|
|
__free_page(node->pages[i]);
|
|
|
|
+ }
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -367,8 +521,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|
struct nvkm_memory **pmemory)
|
|
struct nvkm_memory **pmemory)
|
|
{
|
|
{
|
|
struct gk20a_instmem *imem = gk20a_instmem(base);
|
|
struct gk20a_instmem *imem = gk20a_instmem(base);
|
|
- struct gk20a_instobj *node = NULL;
|
|
|
|
struct nvkm_subdev *subdev = &imem->base.subdev;
|
|
struct nvkm_subdev *subdev = &imem->base.subdev;
|
|
|
|
+ struct gk20a_instobj *node = NULL;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
|
|
nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
|
|
@@ -388,7 +542,6 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
|
|
|
|
node->imem = imem;
|
|
node->imem = imem;
|
|
|
|
|
|
/* present memory for being mapped using small pages */
|
|
/* present memory for being mapped using small pages */
|
|
@@ -402,15 +555,25 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void
|
|
|
|
-gk20a_instmem_fini(struct nvkm_instmem *base)
|
|
|
|
|
|
+static void *
|
|
|
|
+gk20a_instmem_dtor(struct nvkm_instmem *base)
|
|
{
|
|
{
|
|
- gk20a_instmem(base)->addr = ~0ULL;
|
|
|
|
|
|
+ struct gk20a_instmem *imem = gk20a_instmem(base);
|
|
|
|
+
|
|
|
|
+ /* perform some sanity checks... */
|
|
|
|
+ if (!list_empty(&imem->vaddr_lru))
|
|
|
|
+ nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
|
|
|
|
+
|
|
|
|
+ if (imem->vaddr_use != 0)
|
|
|
|
+ nvkm_warn(&base->subdev, "instobj vmap area not empty! "
|
|
|
|
+ "0x%x bytes still mapped\n", imem->vaddr_use);
|
|
|
|
+
|
|
|
|
+ return imem;
|
|
}
|
|
}
|
|
|
|
|
|
static const struct nvkm_instmem_func
|
|
static const struct nvkm_instmem_func
|
|
gk20a_instmem = {
|
|
gk20a_instmem = {
|
|
- .fini = gk20a_instmem_fini,
|
|
|
|
|
|
+ .dtor = gk20a_instmem_dtor,
|
|
.memory_new = gk20a_instobj_new,
|
|
.memory_new = gk20a_instobj_new,
|
|
.persistent = true,
|
|
.persistent = true,
|
|
.zero = false,
|
|
.zero = false,
|
|
@@ -429,23 +592,27 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
|
|
spin_lock_init(&imem->lock);
|
|
spin_lock_init(&imem->lock);
|
|
*pimem = &imem->base;
|
|
*pimem = &imem->base;
|
|
|
|
|
|
|
|
+ /* do not allow more than 1MB of CPU-mapped instmem */
|
|
|
|
+ imem->vaddr_use = 0;
|
|
|
|
+ imem->vaddr_max = 0x100000;
|
|
|
|
+ INIT_LIST_HEAD(&imem->vaddr_lru);
|
|
|
|
+
|
|
if (tdev->iommu.domain) {
|
|
if (tdev->iommu.domain) {
|
|
- imem->domain = tdev->iommu.domain;
|
|
|
|
|
|
+ imem->mm_mutex = &tdev->iommu.mutex;
|
|
imem->mm = &tdev->iommu.mm;
|
|
imem->mm = &tdev->iommu.mm;
|
|
|
|
+ imem->domain = tdev->iommu.domain;
|
|
imem->iommu_pgshift = tdev->iommu.pgshift;
|
|
imem->iommu_pgshift = tdev->iommu.pgshift;
|
|
- imem->mm_mutex = &tdev->iommu.mutex;
|
|
|
|
|
|
+ imem->cpu_map = gk20a_instobj_cpu_map_iommu;
|
|
|
|
|
|
nvkm_info(&imem->base.subdev, "using IOMMU\n");
|
|
nvkm_info(&imem->base.subdev, "using IOMMU\n");
|
|
} else {
|
|
} else {
|
|
init_dma_attrs(&imem->attrs);
|
|
init_dma_attrs(&imem->attrs);
|
|
- /*
|
|
|
|
- * We will access instmem through PRAMIN and thus do not need a
|
|
|
|
- * consistent CPU pointer or kernel mapping
|
|
|
|
- */
|
|
|
|
|
|
+ /* We will access the memory through our own mapping */
|
|
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
|
|
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
|
|
dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
|
|
dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
|
|
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
|
|
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
|
|
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
|
|
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
|
|
|
|
+ imem->cpu_map = gk20a_instobj_cpu_map_dma;
|
|
|
|
|
|
nvkm_info(&imem->base.subdev, "using DMA API\n");
|
|
nvkm_info(&imem->base.subdev, "using DMA API\n");
|
|
}
|
|
}
|