|
@@ -111,7 +111,7 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
|
|
|
struct nvkm_gpuobj *inst = chan->base.inst;
|
|
|
|
|
|
if (offset) {
|
|
|
- u64 addr = chan->engn[engine->subdev.index].vma.offset;
|
|
|
+ u64 addr = chan->engn[engine->subdev.index].vma->addr;
|
|
|
nvkm_kmap(inst);
|
|
|
nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
|
|
|
nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
|
|
@@ -126,11 +126,7 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
|
|
|
struct nvkm_engine *engine)
|
|
|
{
|
|
|
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
|
|
|
- struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
|
|
|
- if (vma->vm) {
|
|
|
- nvkm_vm_unmap(vma);
|
|
|
- nvkm_vm_put(vma);
|
|
|
- }
|
|
|
+ nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
|
|
|
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
|
|
|
}
|
|
|
|
|
@@ -150,13 +146,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- ret = nvkm_vm_get(chan->base.vmm, chan->engn[engn].inst->size, 12,
|
|
|
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
|
|
|
+ ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
|
|
|
+ &chan->engn[engn].vma);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
|
|
|
- &chan->engn[engn].vma, NULL, 0);
|
|
|
+ chan->engn[engn].vma, NULL, 0);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -252,7 +248,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
|
|
|
(1ULL << NVKM_ENGINE_MSPPP) |
|
|
|
(1ULL << NVKM_ENGINE_MSVLD) |
|
|
|
(1ULL << NVKM_ENGINE_SW),
|
|
|
- 1, fifo->user.bar.offset, 0x1000,
|
|
|
+ 1, fifo->user.bar->addr, 0x1000,
|
|
|
oclass, &chan->base);
|
|
|
if (ret)
|
|
|
return ret;
|