|
@@ -48,9 +48,9 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
|
|
|
{
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
|
|
int i = reg - drm->tile.reg;
|
|
|
- struct nouveau_fb *pfb = nvxx_fb(&drm->device);
|
|
|
- struct nouveau_fb_tile *tile = &pfb->tile.region[i];
|
|
|
- struct nouveau_engine *engine;
|
|
|
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
|
|
|
+ struct nvkm_fb_tile *tile = &pfb->tile.region[i];
|
|
|
+ struct nvkm_engine *engine;
|
|
|
|
|
|
nouveau_fence_unref(®->fence);
|
|
|
|
|
@@ -62,9 +62,9 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
|
|
|
|
|
|
pfb->tile.prog(pfb, i, tile);
|
|
|
|
|
|
- if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
|
|
|
+ if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
|
|
|
engine->tile_prog(engine, i);
|
|
|
- if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
|
|
|
+ if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
|
|
|
engine->tile_prog(engine, i);
|
|
|
}
|
|
|
|
|
@@ -105,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
|
|
|
u32 size, u32 pitch, u32 flags)
|
|
|
{
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
|
|
- struct nouveau_fb *pfb = nvxx_fb(&drm->device);
|
|
|
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
|
|
|
struct nouveau_drm_tile *tile, *found = NULL;
|
|
|
int i;
|
|
|
|
|
@@ -325,7 +325,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
|
|
|
memtype == TTM_PL_FLAG_VRAM && contig) {
|
|
|
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
|
|
|
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
|
|
- struct nouveau_mem *mem = bo->mem.mm_node;
|
|
|
+ struct nvkm_mem *mem = bo->mem.mm_node;
|
|
|
if (!list_is_singular(&mem->regions))
|
|
|
evict = true;
|
|
|
}
|
|
@@ -459,7 +459,7 @@ void
|
|
|
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
|
|
{
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
|
|
- struct nouveau_device *device = nvxx_device(&drm->device);
|
|
|
+ struct nvkm_device *device = nvxx_device(&drm->device);
|
|
|
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
|
|
|
int i;
|
|
|
|
|
@@ -479,7 +479,7 @@ void
|
|
|
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
|
|
{
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
|
|
- struct nouveau_device *device = nvxx_device(&drm->device);
|
|
|
+ struct nvkm_device *device = nvxx_device(&drm->device);
|
|
|
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
|
|
|
int i;
|
|
|
|
|
@@ -695,7 +695,7 @@ static int
|
|
|
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
- struct nouveau_mem *node = old_mem->mm_node;
|
|
|
+ struct nvkm_mem *node = old_mem->mm_node;
|
|
|
int ret = RING_SPACE(chan, 10);
|
|
|
if (ret == 0) {
|
|
|
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
|
|
@@ -727,7 +727,7 @@ static int
|
|
|
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
- struct nouveau_mem *node = old_mem->mm_node;
|
|
|
+ struct nvkm_mem *node = old_mem->mm_node;
|
|
|
u64 src_offset = node->vma[0].offset;
|
|
|
u64 dst_offset = node->vma[1].offset;
|
|
|
u32 page_count = new_mem->num_pages;
|
|
@@ -765,7 +765,7 @@ static int
|
|
|
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
- struct nouveau_mem *node = old_mem->mm_node;
|
|
|
+ struct nvkm_mem *node = old_mem->mm_node;
|
|
|
u64 src_offset = node->vma[0].offset;
|
|
|
u64 dst_offset = node->vma[1].offset;
|
|
|
u32 page_count = new_mem->num_pages;
|
|
@@ -804,7 +804,7 @@ static int
|
|
|
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
- struct nouveau_mem *node = old_mem->mm_node;
|
|
|
+ struct nvkm_mem *node = old_mem->mm_node;
|
|
|
u64 src_offset = node->vma[0].offset;
|
|
|
u64 dst_offset = node->vma[1].offset;
|
|
|
u32 page_count = new_mem->num_pages;
|
|
@@ -842,7 +842,7 @@ static int
|
|
|
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
- struct nouveau_mem *node = old_mem->mm_node;
|
|
|
+ struct nvkm_mem *node = old_mem->mm_node;
|
|
|
int ret = RING_SPACE(chan, 7);
|
|
|
if (ret == 0) {
|
|
|
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
|
|
@@ -860,7 +860,7 @@ static int
|
|
|
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
- struct nouveau_mem *node = old_mem->mm_node;
|
|
|
+ struct nvkm_mem *node = old_mem->mm_node;
|
|
|
int ret = RING_SPACE(chan, 7);
|
|
|
if (ret == 0) {
|
|
|
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
|
|
@@ -894,12 +894,12 @@ static int
|
|
|
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
- struct nouveau_mem *node = old_mem->mm_node;
|
|
|
+ struct nvkm_mem *node = old_mem->mm_node;
|
|
|
u64 length = (new_mem->num_pages << PAGE_SHIFT);
|
|
|
u64 src_offset = node->vma[0].offset;
|
|
|
u64 dst_offset = node->vma[1].offset;
|
|
|
int src_tiled = !!node->memtype;
|
|
|
- int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
|
|
|
+ int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
|
|
|
int ret;
|
|
|
|
|
|
while (length) {
|
|
@@ -1036,25 +1036,25 @@ static int
|
|
|
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
- struct nouveau_mem *old_node = bo->mem.mm_node;
|
|
|
- struct nouveau_mem *new_node = mem->mm_node;
|
|
|
+ struct nvkm_mem *old_node = bo->mem.mm_node;
|
|
|
+ struct nvkm_mem *new_node = mem->mm_node;
|
|
|
u64 size = (u64)mem->num_pages << PAGE_SHIFT;
|
|
|
int ret;
|
|
|
|
|
|
- ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
|
|
|
- NV_MEM_ACCESS_RW, &old_node->vma[0]);
|
|
|
+ ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
|
|
|
+ NV_MEM_ACCESS_RW, &old_node->vma[0]);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
|
|
|
- NV_MEM_ACCESS_RW, &old_node->vma[1]);
|
|
|
+ ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
|
|
|
+ NV_MEM_ACCESS_RW, &old_node->vma[1]);
|
|
|
if (ret) {
|
|
|
- nouveau_vm_put(&old_node->vma[0]);
|
|
|
+ nvkm_vm_put(&old_node->vma[0]);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- nouveau_vm_map(&old_node->vma[0], old_node);
|
|
|
- nouveau_vm_map(&old_node->vma[1], new_node);
|
|
|
+ nvkm_vm_map(&old_node->vma[0], old_node);
|
|
|
+ nvkm_vm_map(&old_node->vma[1], new_node);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1069,7 +1069,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
|
|
int ret;
|
|
|
|
|
|
/* create temporary vmas for the transfer and attach them to the
|
|
|
- * old nouveau_mem node, these will get cleaned up after ttm has
|
|
|
+ * old nvkm_mem node, these will get cleaned up after ttm has
|
|
|
* destroyed the ttm_mem_reg
|
|
|
*/
|
|
|
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
|
@@ -1231,7 +1231,7 @@ static void
|
|
|
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
|
|
|
{
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
|
|
- struct nouveau_vma *vma;
|
|
|
+ struct nvkm_vma *vma;
|
|
|
|
|
|
/* ttm can now (stupidly) pass the driver bos it didn't create... */
|
|
|
if (bo->destroy != nouveau_bo_del_ttm)
|
|
@@ -1241,9 +1241,9 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
|
|
|
if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
|
|
|
(new_mem->mem_type == TTM_PL_VRAM ||
|
|
|
nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
|
|
|
- nouveau_vm_map(vma, new_mem->mm_node);
|
|
|
+ nvkm_vm_map(vma, new_mem->mm_node);
|
|
|
} else {
|
|
|
- nouveau_vm_unmap(vma);
|
|
|
+ nvkm_vm_unmap(vma);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1354,7 +1354,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
|
|
- struct nouveau_mem *node = mem->mm_node;
|
|
|
+ struct nvkm_mem *node = mem->mm_node;
|
|
|
int ret;
|
|
|
|
|
|
mem->bus.addr = NULL;
|
|
@@ -1385,7 +1385,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
|
|
mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
|
|
|
mem->bus.is_iomem = true;
|
|
|
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
|
|
- struct nouveau_bar *bar = nvxx_bar(&drm->device);
|
|
|
+ struct nvkm_bar *bar = nvxx_bar(&drm->device);
|
|
|
|
|
|
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
|
|
|
&node->bar_vma);
|
|
@@ -1405,8 +1405,8 @@ static void
|
|
|
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
|
|
- struct nouveau_bar *bar = nvxx_bar(&drm->device);
|
|
|
- struct nouveau_mem *node = mem->mm_node;
|
|
|
+ struct nvkm_bar *bar = nvxx_bar(&drm->device);
|
|
|
+ struct nvkm_mem *node = mem->mm_node;
|
|
|
|
|
|
if (!node->bar_vma.node)
|
|
|
return;
|
|
@@ -1465,7 +1465,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
|
|
|
{
|
|
|
struct ttm_dma_tt *ttm_dma = (void *)ttm;
|
|
|
struct nouveau_drm *drm;
|
|
|
- struct nouveau_device *device;
|
|
|
+ struct nvkm_device *device;
|
|
|
struct drm_device *dev;
|
|
|
struct device *pdev;
|
|
|
unsigned i;
|
|
@@ -1539,7 +1539,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|
|
{
|
|
|
struct ttm_dma_tt *ttm_dma = (void *)ttm;
|
|
|
struct nouveau_drm *drm;
|
|
|
- struct nouveau_device *device;
|
|
|
+ struct nvkm_device *device;
|
|
|
struct drm_device *dev;
|
|
|
struct device *pdev;
|
|
|
unsigned i;
|
|
@@ -1613,10 +1613,10 @@ struct ttm_bo_driver nouveau_bo_driver = {
|
|
|
.io_mem_free = &nouveau_ttm_io_mem_free,
|
|
|
};
|
|
|
|
|
|
-struct nouveau_vma *
|
|
|
-nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
|
|
|
+struct nvkm_vma *
|
|
|
+nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
|
|
|
{
|
|
|
- struct nouveau_vma *vma;
|
|
|
+ struct nvkm_vma *vma;
|
|
|
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
|
|
if (vma->vm == vm)
|
|
|
return vma;
|
|
@@ -1626,13 +1626,13 @@ nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
|
|
|
}
|
|
|
|
|
|
int
|
|
|
-nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
|
|
|
- struct nouveau_vma *vma)
|
|
|
+nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
|
|
|
+ struct nvkm_vma *vma)
|
|
|
{
|
|
|
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
|
|
int ret;
|
|
|
|
|
|
- ret = nouveau_vm_get(vm, size, nvbo->page_shift,
|
|
|
+ ret = nvkm_vm_get(vm, size, nvbo->page_shift,
|
|
|
NV_MEM_ACCESS_RW, vma);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -1640,7 +1640,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
|
|
|
if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
|
|
|
(nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
|
|
|
nvbo->page_shift != vma->vm->mmu->lpg_shift))
|
|
|
- nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
|
|
|
+ nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
|
|
|
|
|
|
list_add_tail(&vma->head, &nvbo->vma_list);
|
|
|
vma->refcount = 1;
|
|
@@ -1648,12 +1648,12 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
|
|
|
}
|
|
|
|
|
|
void
|
|
|
-nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
|
|
|
+nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
|
|
|
{
|
|
|
if (vma->node) {
|
|
|
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
|
|
|
- nouveau_vm_unmap(vma);
|
|
|
- nouveau_vm_put(vma);
|
|
|
+ nvkm_vm_unmap(vma);
|
|
|
+ nvkm_vm_put(vma);
|
|
|
list_del(&vma->head);
|
|
|
}
|
|
|
}
|